diff --git a/Cargo.lock b/Cargo.lock index 483777ce52b..fc188cf01b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3229,6 +3229,7 @@ dependencies = [ "serde", "serde_json", "thiserror 2.0.17", + "triedb", "walkdir", ] @@ -3523,6 +3524,7 @@ dependencies = [ "reth-ethereum", "reth-ethereum-payload-builder", "reth-payload-builder", + "reth-provider", "reth-tracing", "serde", "thiserror 2.0.17", @@ -3590,6 +3592,7 @@ dependencies = [ "reth-optimism-flashblocks", "reth-optimism-forks", "reth-payload-builder", + "reth-provider", "reth-rpc-api", "reth-rpc-engine-api", "revm", @@ -4166,6 +4169,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generator" version = "0.8.7" @@ -7769,6 +7781,7 @@ dependencies = [ "roaring", "serde", "test-fuzz", + "triedb", ] [[package]] @@ -7916,6 +7929,7 @@ dependencies = [ "reth-chainspec", "reth-config", "reth-consensus", + "reth-db", "reth-ethereum-primitives", "reth-metrics", "reth-network-p2p", @@ -9836,6 +9850,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-trie", "assert_matches", "dashmap 6.1.0", "eyre", @@ -9866,6 +9881,7 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", + "reth-trie-common", "reth-trie-db", "revm-database", "revm-database-interface", @@ -9874,6 +9890,7 @@ dependencies = [ "tempfile", "tokio", "tracing", + "triedb", ] [[package]] @@ -10584,6 +10601,7 @@ dependencies = [ "reth-static-file-types", "revm-database-interface", "thiserror 2.0.17", + "triedb", ] [[package]] @@ -10819,11 +10837,14 @@ dependencies = [ "reth-provider", "reth-trie", "reth-trie-common", + "reth-trie-sparse", "revm", "revm-database", "serde_json", "similar-asserts", + "tempfile", "tracing", + "triedb", "triehash", ] @@ -10852,6 +10873,7 @@ dependencies = [ "reth-trie-common", "reth-trie-db", "reth-trie-sparse", + "tempfile", "thiserror 2.0.17", "tokio", "tracing", @@ -10919,6 +10941,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-triedb" +version = "1.9.3" +dependencies = [ + "alloy-primitives", + "alloy-trie", + "reth-db-api", + "reth-primitives-traits", + "reth-provider", + "reth-storage-api", + "reth-storage-errors", + "reth-trie", + "reth-trie-common", + "reth-trie-db", + "tempfile", + "tokio", + "tracing", + "triedb", +] + [[package]] name = "reth-zstd-compressors" version = "1.9.3" @@ -11562,6 +11604,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sealed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "sec1" version = "0.7.3" @@ -12990,6 +13043,27 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "triedb" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-trie", + "arrayvec", + "fxhash", + "memmap2", + "metrics", + "metrics-derive", + "parking_lot", + "proptest", + "proptest-derive 0.6.0", + "rayon", + "sealed", + "static_assertions", + "zerocopy", +] + [[package]] name = "triehash" version = "0.8.4" diff --git a/Cargo.toml b/Cargo.toml index 4e27936d255..eac78f357a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,6 +141,7 @@ members = [ "crates/trie/sparse", "crates/trie/sparse-parallel/", "crates/trie/trie", + "crates/triedb/", "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", "examples/bsc-p2p", @@ -461,6 +462,7 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-sparse = { path = "crates/trie/sparse", default-features = false } reth-trie-sparse-parallel = { path = "crates/trie/sparse-parallel" } +reth-triedb = { path = "crates/triedb" } reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 5b8cfce7716..cc50679ce21 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -21,7 +21,7 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, }; use reth_provider::{ - providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, + providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider, TrieDbProvider}, ProviderFactory, StaticFileProviderFactory, }; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; @@ -69,10 +69,12 @@ impl EnvironmentArgs { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); + let tdb_path = data_dir.triedb(); if access.is_read_write() { reth_fs_util::create_dir_all(&db_path)?; reth_fs_util::create_dir_all(&sf_path)?; + reth_fs_util::create_dir_all(&tdb_path)?; } let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); @@ -91,7 +93,7 @@ impl EnvironmentArgs { config.stages.era = config.stages.era.with_datadir(data_dir.data_dir()); } - info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage"); + info!(target: "reth::cli", ?db_path, ?sf_path, ?tdb_path, "Opening storage"); let (db, sfp) = match access { AccessRights::RW => ( Arc::new(init_db(db_path, self.db.database_args())?), @@ -102,8 +104,9 @@ impl EnvironmentArgs { StaticFileProvider::read_only(sf_path, false)?, ), }; + let tdb = TrieDbProvider::open(tdb_path)?; - let provider_factory = self.create_provider_factory(&config, db, sfp)?; + let provider_factory = self.create_provider_factory(&config, db, sfp, tdb)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(&provider_factory)?; @@ -122,6 +125,7 @@ impl EnvironmentArgs { config: &Config, db: Arc, static_file_provider: StaticFileProvider, + triedb_provider: TrieDbProvider, ) -> eyre::Result>>> where C: ChainSpecParser, @@ -132,6 +136,7 @@ impl EnvironmentArgs { db, self.chain.clone(), static_file_provider, + triedb_provider, ) .with_prune_modes(prune_modes.clone()); diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 9e8e68e9800..c7a86317b1e 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -9,7 +9,7 @@ use reth_evm::ConfigureEvm; use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{ProviderNodeTypes, StaticFileProvider, TrieDbProvider}, DatabaseProviderFactory, ProviderFactory, }; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; @@ -42,6 +42,7 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, + TrieDbProvider::open(output_datadir.triedb())?, ), to, from, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937e..b04efba0c40 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -6,7 +6,7 @@ use reth_db_api::{database::Database, table::TableImporter, tables}; use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{ProviderNodeTypes, StaticFileProvider, TrieDbProvider}, DatabaseProviderFactory, ProviderFactory, }; use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; @@ -39,6 +39,7 @@ pub(crate) async fn dump_hashing_account_stage bool { - self.has_enough_parallelism && !self.legacy_state_root + // self.has_enough_parallelism && !self.legacy_state_root + false } /// Setter for prewarm max concurrency. diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index ca8a93df079..a1da2f3d8c5 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -30,7 +30,7 @@ use reth_payload_primitives::{ use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ BlockReader, DatabaseProviderFactory, HashedPostStateProvider, ProviderError, StateProviderBox, - StateProviderFactory, StateReader, TransactionVariant, TrieReader, + StateProviderFactory, StateReader, TransactionVariant, TrieDbTxProvider, TrieReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; @@ -316,7 +316,7 @@ where + Clone + 'static,

::Provider: - BlockReader, + BlockReader + TrieDbTxProvider, C: ConfigureEvm + 'static, T: PayloadTypes>, V: EngineValidator, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d1f7531e9dd..8d0e8699192 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -26,7 +26,9 @@ use reth_evm::{ ConfigureEvm, EvmEnvFor, OnStateHook, SpecFor, TxEnvFor, }; use reth_primitives_traits::NodePrimitives; -use reth_provider::{BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader}; +use reth_provider::{ + BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader, TrieDbTxProvider, +}; use reth_revm::{db::BundleState, state::EvmState}; use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use reth_trie_parallel::{ @@ -204,8 +206,9 @@ where ) -> PayloadHandle, I::Tx>, I::Error> where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, - F: DatabaseProviderROFactory - + Clone + F: DatabaseProviderROFactory< + Provider: TrieCursorFactory + HashedCursorFactory + TrieDbTxProvider, + > + Clone + Send + 'static, { @@ -700,7 +703,7 @@ mod tests { use reth_provider::{ providers::{BlockchainProvider, OverlayStateProviderFactory}, test_utils::create_test_provider_factory_with_chain_spec, - ChainSpecProvider, HashingWriter, + ChainSpecProvider, HashingWriter, TrieDbTxProvider, }; use reth_testing_utils::generators; use reth_trie::{test_utils::state_root, HashedPostState}; @@ -879,6 +882,8 @@ mod tests { } } + let root_from_triedb = factory.provider().unwrap().triedb_tx().state_root(); + let mut payload_processor = PayloadProcessor::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), @@ -910,5 +915,9 @@ mod tests { root_from_task, root_from_regular, "State root mismatch: task={root_from_task}, base={root_from_regular}" ); + assert_eq!( + root_from_task, root_from_triedb, + "State root mismatch: task={root_from_task}, triedb={root_from_triedb}" + ); } } diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index ec6ac71a459..c2122a3bf42 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -35,7 +35,7 @@ use reth_provider::{ providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockReader, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, - StateRootProvider, TrieReader, + StateRootProvider, TrieDbTxProvider, TrieReader, }; use reth_revm::db::State; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; @@ -129,7 +129,11 @@ impl BasicEngineValidator where N: NodePrimitives, P: DatabaseProviderFactory< - Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + Provider: BlockReader + + TrieReader + + StageCheckpointReader + + PruneCheckpointReader + + TrieDbTxProvider, > + BlockReader

+ StateProviderFactory + StateReader @@ -998,7 +1002,11 @@ pub trait EngineValidator< impl EngineValidator for BasicEngineValidator where P: DatabaseProviderFactory< - Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, + Provider: BlockReader + + TrieReader + + StageCheckpointReader + + PruneCheckpointReader + + TrieDbTxProvider, > + BlockReader
+ StateProviderFactory + StateReader diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index fa81d70e61f..1e60f752abd 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -35,7 +35,9 @@ use reth_node_builder::{ BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, }; use reth_payload_primitives::PayloadTypes; -use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; +use reth_provider::{ + providers::ProviderFactoryBuilder, DatabaseProviderFactory, EthStorage, TrieDbTxProvider, +}; use reth_rpc::{ eth::core::{EthApiFor, EthRpcConverterFor}, ValidationApi, @@ -372,6 +374,7 @@ where impl Node for EthereumNode where N: FullNodeTypes, + ::Provider: TrieDbTxProvider, { type ComponentsBuilder = ComponentsBuilder< N, @@ -394,7 +397,11 @@ where } } -impl> DebugNode for EthereumNode { +impl DebugNode for EthereumNode +where + N: FullNodeComponents, + ::Provider: TrieDbTxProvider, +{ type RpcBlock = alloy_rpc_types_eth::Block; fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_ethereum_primitives::Block { diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 75f8ea9bac4..6e1c895f263 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -15,7 +15,7 @@ use reth_ethereum_engine_primitives::EthPayloadBuilderAttributes; use reth_ethereum_primitives::TxType; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthereumNode; -use reth_provider::FullProvider; +use reth_provider::{DatabaseProviderFactory, FullProvider, TrieDbTxProvider}; /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { @@ -38,6 +38,7 @@ pub(crate) async fn advance_with_random_transactions( ) -> eyre::Result<()> where Provider: FullProvider>, + ::Provider: TrieDbTxProvider, { let provider = ProviderBuilder::new().connect_http(node.rpc_url()); let signers = Wallet::new(1).with_chain_id(provider.get_chain_id().await?).wallet_gen(); diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 0305da323d0..5a152985ea3 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -20,7 +20,9 @@ use futures_util::FutureExt; use reth_chainspec::{ChainSpec, MAINNET}; use reth_consensus::test_utils::TestConsensus; use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + test_utils::{ + create_test_rw_db, create_test_static_files_dir, create_test_triedb_dir, TempDatabase, + }, DatabaseEnv, }; use reth_db_common::init::init_genesis; @@ -50,8 +52,8 @@ use reth_node_ethereum::{ use reth_payload_builder::noop::NoopPayloadBuilderService; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ - providers::{BlockchainProvider, StaticFileProvider}, - BlockReader, EthStorage, ProviderFactory, + providers::{BlockchainProvider, StaticFileProvider, TrieDbProvider}, + BlockReader, DatabaseProviderFactory, EthStorage, ProviderFactory, TrieDbTxProvider, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -123,6 +125,7 @@ impl NodeTypes for TestNode { impl Node for TestNode where N: FullNodeTypes, + ::Provider: TrieDbTxProvider, { type ComponentsBuilder = ComponentsBuilder< N, @@ -239,11 +242,13 @@ pub async fn test_exex_context_with_chain_spec( let consensus = Arc::new(TestConsensus::default()); let (static_dir, _) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); let db = create_test_rw_db(); let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), + TrieDbProvider::open(triedb_dir.keep()).expect("triedb provider"), ); let genesis_hash = init_genesis(&provider_factory)?; diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 056d809d02f..05ba5f543ee 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -62,6 +62,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-tracing.workspace = true +reth-db = { workspace = true } assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 5d6bd3cf7f8..202b9526108 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -621,8 +621,16 @@ mod tests { }; use alloy_primitives::B256; use assert_matches::assert_matches; + use reth_chainspec::MAINNET; use reth_consensus::test_utils::TestConsensus; - use reth_provider::test_utils::create_test_provider_factory; + use reth_db::test_utils::{ + create_test_rw_db, create_test_static_files_dir, create_test_triedb_dir, + }; + use reth_provider::{ + providers::{StaticFileProvider, TrieDbProvider}, + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + ProviderFactory, + }; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; use std::collections::HashMap; @@ -631,7 +639,14 @@ mod tests { #[tokio::test] async fn streams_bodies_in_order() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let (headers, mut bodies) = generate_bodies(0..=19); insert_headers(&factory, &headers); @@ -660,7 +675,14 @@ mod tests { #[tokio::test] async fn requests_correct_number_of_times() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let mut rng = generators::rng(); let blocks = random_block_range( &mut rng, @@ -697,7 +719,14 @@ mod tests { #[tokio::test] async fn streams_bodies_in_order_after_range_reset() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let (headers, mut bodies) = generate_bodies(0..=99); insert_headers(&factory, &headers); @@ -734,7 +763,14 @@ mod tests { #[tokio::test] async fn can_download_new_range_after_termination() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let (headers, mut bodies) = generate_bodies(0..=199); insert_headers(&factory, &headers); @@ -771,7 +807,14 @@ mod tests { #[tokio::test] async fn can_download_after_exceeding_limit() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let (headers, mut bodies) = generate_bodies(0..=199); insert_headers(&factory, &headers); @@ -803,7 +846,14 @@ mod tests { #[tokio::test] async fn can_tolerate_empty_responses() { // Generate some random blocks - let factory = create_test_provider_factory(); + let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + let factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), + ); let (headers, mut bodies) = generate_bodies(0..=99); insert_headers(&factory, &headers); diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 95909e34710..dfd7efd7450 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -65,7 +65,7 @@ use reth_node_metrics::{ version::VersionInfo, }; use reth_provider::{ - providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, + providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider, TrieDbProvider}, BlockHashReader, BlockNumReader, ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StaticFileProviderFactory, }; @@ -468,6 +468,7 @@ where self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, + TrieDbProvider::open(self.data_dir().triedb())?, ) .with_prune_modes(self.prune_modes()) .with_static_files_metrics(); diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index a66d7b222e4..760f0773fd0 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -23,6 +23,7 @@ use reth_node_core::{ version::{version_metadata, CLIENT_CODE}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; +use reth_provider::{DatabaseProviderFactory, TrieDbTxProvider}; use reth_rpc::eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}; use reth_rpc_api::{eth::helpers::EthTransactions, IntoEngineApiRpcModule}; use reth_rpc_builder::{ @@ -1292,6 +1293,7 @@ where <::Payload as PayloadTypes>::ExecutionData, >, >, + ::Provider: TrieDbTxProvider, EV: PayloadValidatorBuilder, EV::Validator: reth_engine_primitives::PayloadValidator< ::Payload, diff --git a/crates/node/core/src/dirs.rs b/crates/node/core/src/dirs.rs index 4f8507c4e68..4e57b3e8c09 100644 --- a/crates/node/core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -301,6 +301,13 @@ impl ChainPath { } } + /// Returns the path to the TrieDB directory for this chain. + /// + /// `//triedb` + pub fn triedb(&self) -> PathBuf { + self.data_dir().join("triedb") + } + /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 65055eb6717..46cb0b3824a 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -55,7 +55,10 @@ use reth_optimism_txpool::{ supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL}, OpPooledTx, }; -use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; +use reth_provider::{ + providers::ProviderFactoryBuilder, CanonStateSubscriptions, DatabaseProviderFactory, + TrieDbTxProvider, +}; use reth_rpc_api::{eth::RpcTypes, DebugApiServer, L2EthApiExtServer}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -234,6 +237,7 @@ impl OpNode { impl Node for OpNode where N: FullNodeTypes, + ::Provider: TrieDbTxProvider, { type ComponentsBuilder = ComponentsBuilder< N, @@ -264,6 +268,7 @@ where impl DebugNode for OpNode where N: FullNodeComponents, + ::Provider: TrieDbTxProvider, { type RpcBlock = alloy_rpc_types_eth::Block; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 1666e79baf3..5864c6ae67a 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -259,7 +259,8 @@ where Primitives: NodePrimitives, > + StatsReader + BlockHashReader - + StateWriter::Receipt>, + + StateWriter::Receipt> + + reth_provider::TrieDbTxProvider, { /// Return the id of the stage fn id(&self) -> StageId { diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 1e48f2d38e0..112554121bb 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -204,6 +204,7 @@ where let (key, value) = item?; hashed_account_cursor .append(RawKey::::from_vec(key), &RawValue::::from_vec(value))?; + // TODO: insert into TrieDB as well } } else { // Aggregate all transition changesets and make a list of accounts that have been diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 3fe1c7f1f97..12307ce3827 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,7 +1,10 @@ use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ - test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, + test_utils::{ + create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir, + create_test_triedb_dir, + }, DatabaseEnv, }; use reth_db_api::{ @@ -17,7 +20,7 @@ use reth_db_api::{ use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader, StorageEntry}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter, TrieDbProvider}, test_utils::MockNodeTypesWithDB, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, StatsReader, }; @@ -38,12 +41,15 @@ impl Default for TestStageDB { /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); + Self { temp_static_files_dir: static_dir, factory: ProviderFactory::new( create_test_rw_db(), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), ), } } @@ -52,6 +58,7 @@ impl Default for TestStageDB { impl TestStageDB { pub fn new(path: &Path) -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); Self { temp_static_files_dir: static_dir, @@ -59,6 +66,7 @@ impl TestStageDB { create_test_rw_db_with_path(path), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), ), } } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index bd77b9d63d7..68851cbbd31 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -22,6 +22,10 @@ reth-prune-types = { workspace = true, features = ["serde", "reth-codec"] } reth-storage-errors.workspace = true reth-trie-common.workspace = true +# triedb +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } + # ethereum alloy-primitives.workspace = true alloy-genesis.workspace = true diff --git a/crates/storage/db-api/src/lib.rs b/crates/storage/db-api/src/lib.rs index f39b2c49708..557b4461d63 100644 --- a/crates/storage/db-api/src/lib.rs +++ b/crates/storage/db-api/src/lib.rs @@ -94,3 +94,5 @@ pub use database::Database; mod unwind; pub use unwind::DbTxUnwindExt; + +pub mod triedb; diff --git a/crates/storage/db-api/src/triedb.rs b/crates/storage/db-api/src/triedb.rs new file mode 100644 index 00000000000..9054e540d49 --- /dev/null +++ b/crates/storage/db-api/src/triedb.rs @@ -0,0 +1,46 @@ +//! TrieDB transaction and cursor traits for interacting with the trie database. +//! +//! This module provides traits for reading and writing to the trie database, +//! including support for account and storage slot operations. + +use alloy_primitives::StorageValue; +use reth_storage_errors::db::DatabaseError; +use triedb::{ + account::Account, + path::{AddressPath, StoragePath}, +}; + +/// Trait for reading and writing to the trie database. +/// Provides methods for getting account and storage data, and committing changes. +pub trait TrieDbTx: Send + Sync { + /// Get an account by its address path. + fn get_account(&self, address_path: AddressPath) -> Result, DatabaseError>; + + /// Get a storage slot value by its storage path. + fn get_storage_slot( + &self, + storage_path: StoragePath, + ) -> Result, DatabaseError>; + + /// Commit any pending changes to the database. + fn commit(self) -> Result<(), DatabaseError>; +} + +/// Trait for read-write operations on the trie database. +/// Extends TrieDbTx with methods for modifying account and storage data. +pub trait TrieDbTxRW: TrieDbTx { + /// Set an account at the given address path. + fn set_account( + &self, + address_path: AddressPath, + account: Option, + ) -> Result<(), DatabaseError>; + /// Set a storage slot value at the given storage path. + fn set_storage_slot( + &self, + storage_path: StoragePath, + value: Option, + ) -> Result<(), DatabaseError>; + /// Apply all pending changes to the database. + fn apply_changes(&self) -> Result<(), DatabaseError>; +} diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index de55cea3c99..58acac62bbf 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -17,7 +17,7 @@ use reth_provider::{ BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + TrieDbProviderFactory, TrieDbTxProvider, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -90,7 +90,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader, + + BlockHashReader + + TrieDbProviderFactory, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter @@ -98,6 +99,7 @@ where + HashingWriter + StateWriter + TrieWriter + + TrieDbTxProvider + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -723,6 +725,7 @@ mod tests { fn fail_init_inconsistent_db() { let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone()); let static_file_provider = factory.static_file_provider(); + let triedb_provider = factory.triedb_provider(); init_genesis(&factory).unwrap(); // Try to init db with a different genesis block @@ -730,6 +733,7 @@ mod tests { factory.into_db(), MAINNET.clone(), static_file_provider, + triedb_provider, )); assert!(matches!( diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a6306723847..2b851201170 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -159,6 +159,14 @@ pub mod test_utils { (temp_dir, path) } + /// Create `triedb` path for testing + #[track_caller] + pub fn create_test_triedb_dir() -> (TempDir, PathBuf) { + let temp_dir = TempDir::with_prefix("reth-test-triedb-").expect(ERROR_TEMPDIR); + let path = temp_dir.path().to_path_buf(); + (temp_dir, path) + } + /// Get a temporary directory path to use for the database pub fn tempdir_path() -> PathBuf { let builder = tempfile::Builder::new().prefix("reth-test-").rand_bytes(8).tempdir(); diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index ac390343c50..958b087e83e 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -25,6 +25,10 @@ alloy-rlp.workspace = true derive_more.workspace = true thiserror.workspace = true +# triedb +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } + revm-database-interface.workspace = true [features] diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs index eca6cd47a45..74b080b05a1 100644 --- a/crates/storage/errors/src/lib.rs +++ b/crates/storage/errors/src/lib.rs @@ -23,3 +23,6 @@ pub use provider::{ProviderError, ProviderResult}; /// Any error pub mod any; + +/// TrieDB error +pub mod triedb; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index ed5230c18fb..43cdc97ab09 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,4 +1,4 @@ -use crate::{any::AnyError, db::DatabaseError}; +use crate::{any::AnyError, db::DatabaseError, triedb::TrieDBError}; use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; @@ -139,6 +139,9 @@ pub enum ProviderError { /// The available range of blocks with changesets available: core::ops::RangeInclusive, }, + /// Error opening a read-only or read-write transaction from TrieDB + #[error(transparent)] + TrieDB(#[from] TrieDBError), /// Any other error type wrapped into a cloneable [`AnyError`]. #[error(transparent)] Other(#[from] AnyError), diff --git a/crates/storage/errors/src/triedb.rs b/crates/storage/errors/src/triedb.rs new file mode 100644 index 00000000000..0e03ab69718 --- /dev/null +++ b/crates/storage/errors/src/triedb.rs @@ -0,0 +1,40 @@ +use crate::ProviderError; +use triedb::transaction::TransactionError; + +impl From for ProviderError { + fn from(err: TransactionError) -> Self { + TrieDBError::TransactionError(err.to_string()).into() + } +} + +impl From for ProviderError { + fn from(err: triedb::database::Error) -> Self { + TrieDBError::DatabaseError(format!("{:?}", err)).into() + } +} + +impl From for ProviderError { + fn from(err: triedb::database::OpenError) -> Self { + TrieDBError::OpenError(format!("{:?}", err)).into() + } +} + +/// TrieDB error type. +#[derive(Clone, Debug, thiserror::Error)] +pub enum TrieDBError { + /// Error opening TrieDB + #[error("received triedb database open error: {_0}")] + OpenError(String), + /// Error opening a read-only or read-write transaction from TrieDB + #[error("received triedb database error: {_0}")] + DatabaseError(String), + /// Error opening a read-only or read-write transaction from TrieDB + #[error("received triedb transaction error: {_0}")] + TransactionError(String), + /// Attempting to write using a read-only transaction + #[error("received reth triedb read-only write error")] + ReadOnlyWriteError, + /// An assertion check failed + #[error("triedb assertion failed: {_0}")] + AssertionError(String), +} diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index e8599a89706..fa914fe7f63 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -27,6 +27,7 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } +reth-trie-common.workspace = true reth-nippy-jar.workspace = true reth-codecs.workspace = true reth-chain-state.workspace = true @@ -37,9 +38,14 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true revm-database.workspace = true revm-state = { workspace = true, optional = true } +# triedb +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } + # tracing tracing.workspace = true diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432e..81e0d8ae5a8 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -3,7 +3,10 @@ //! This also includes general purpose staging types that provide builder style functions that lead //! up to the intended build target. -use crate::{providers::StaticFileProvider, ProviderFactory}; +use crate::{ + providers::{StaticFileProvider, TrieDbProvider}, + ProviderFactory, +}; use reth_db::{ mdbx::{DatabaseArguments, MaxReadTransactionDuration}, open_db_read_only, DatabaseEnv, @@ -105,12 +108,14 @@ impl ProviderFactoryBuilder { where N: NodeTypes, { - let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = + let ReadOnlyConfig { db_dir, db_args, static_files_dir, triedb_dir, watch_static_files } = config.into(); Ok(self .db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) + // TODO: remove unwrap + .triedb(TrieDbProvider::open(triedb_dir).unwrap()) .build_provider_factory()) } } @@ -133,6 +138,8 @@ pub struct ReadOnlyConfig { pub db_args: DatabaseArguments, /// The path to the static file dir pub static_files_dir: PathBuf, + /// The path to the triedb dir + pub triedb_dir: PathBuf, /// Whether the static files should be watched for changes. pub watch_static_files: bool, } @@ -152,7 +159,7 @@ impl ReadOnlyConfig { /// [`StaticFileProvider::read_only`] pub fn from_datadir(datadir: impl AsRef) -> Self { let datadir = datadir.as_ref(); - Self::from_dirs(datadir.join("db"), datadir.join("static_files")) + Self::from_dirs(datadir.join("db"), datadir.join("static_files"), datadir.join("triedb")) } /// Disables long-lived read transaction safety guarantees. @@ -181,13 +188,10 @@ impl ReadOnlyConfig { /// If the path does not exist pub fn from_db_dir(db_dir: impl AsRef) -> Self { let db_dir = db_dir.as_ref(); - let static_files_dir = std::fs::canonicalize(db_dir) - .unwrap() - .parent() - .unwrap() - .to_path_buf() - .join("static_files"); - Self::from_dirs(db_dir, static_files_dir) + let base_dir = std::fs::canonicalize(db_dir).unwrap().parent().unwrap().to_path_buf(); + let static_files_dir = base_dir.join("static_files"); + let triedb_dir = base_dir.join("triedb"); + Self::from_dirs(db_dir, static_files_dir, triedb_dir) } /// Creates the config for the given paths. @@ -195,9 +199,14 @@ impl ReadOnlyConfig { /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] - pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef) -> Self { + pub fn from_dirs( + db_dir: impl AsRef, + static_files_dir: impl AsRef, + triedb_dir: impl AsRef, + ) -> Self { Self { static_files_dir: static_files_dir.as_ref().into(), + triedb_dir: triedb_dir.as_ref().into(), db_dir: db_dir.as_ref().into(), db_args: Default::default(), watch_static_files: true, @@ -280,16 +289,6 @@ impl TypesAnd2 { Self { _types: Default::default(), val_1, val_2 } } - /// Returns the first value. - pub const fn val_1(&self) -> &Val1 { - &self.val_1 - } - - /// Returns the second value. - pub const fn val_2(&self) -> &Val2 { - &self.val_2 - } - /// Configures the [`StaticFileProvider`]. pub fn static_file( self, @@ -316,16 +315,57 @@ impl TypesAnd3 { pub fn new(val_1: Val1, val_2: Val2, val_3: Val3) -> Self { Self { _types: Default::default(), val_1, val_2, val_3 } } + + /// Returns the first value. + pub const fn val_1(&self) -> &Val1 { + &self.val_1 + } + + /// Returns the second value. + pub const fn val_2(&self) -> &Val2 { + &self.val_2 + } + + /// Returns the second value. + pub const fn val_3(&self) -> &Val3 { + &self.val_3 + } + + /// Configures the [`TrieDbProvider`]. + pub fn triedb( + self, + triedb_provider: TrieDbProvider, + ) -> TypesAnd4 +where { + TypesAnd4::new(self.val_1, self.val_2, self.val_3, triedb_provider) + } +} + +/// This is staging type that contains the configured types and _four_ values. +#[derive(Debug)] +pub struct TypesAnd4 { + _types: PhantomData, + val_1: Val1, + val_2: Val2, + val_3: Val3, + val_4: Val4, +} + +impl TypesAnd4 { + /// Creates a new instance with the given types and four values. + pub fn new(val_1: Val1, val_2: Val2, val_3: Val3, val_4: Val4) -> Self { + Self { _types: Default::default(), val_1, val_2, val_3, val_4 } + } } -impl TypesAnd3, StaticFileProvider> +impl TypesAnd4, StaticFileProvider, TrieDbProvider> where N: NodeTypes, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. pub fn build_provider_factory(self) -> ProviderFactory> { - let Self { _types, val_1, val_2, val_3 } = self; - ProviderFactory::new(val_1, val_2, val_3) + let Self { _types, val_1, val_2, val_3, val_4 } = self; + ProviderFactory::new(val_1, val_2, val_3, val_4) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 873b10b0cfc..b025ad8c491 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -1,11 +1,11 @@ use crate::{ - providers::{state::latest::LatestStateProvider, StaticFileProvider}, + providers::{state::latest::LatestStateProvider, StaticFileProvider, TrieDbProvider}, to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, + TransactionVariant, TransactionsProvider, TrieDbProviderFactory, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; @@ -60,6 +60,8 @@ pub struct ProviderFactory { chain_spec: Arc, /// Static File Provider static_file_provider: StaticFileProvider, + /// TrieDB Provider + triedb_provider: TrieDbProvider, /// Optional pruning configuration prune_modes: PruneModes, /// The node storage handler. @@ -79,11 +81,13 @@ impl ProviderFactory { db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, + triedb_provider: TrieDbProvider, ) -> Self { Self { db, chain_spec, static_file_provider, + triedb_provider, prune_modes: PruneModes::default(), storage: Default::default(), } @@ -121,11 +125,13 @@ impl>> ProviderFactory { chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, + triedb_provider: TrieDbProvider, ) -> RethResult { Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, + triedb_provider, prune_modes: PruneModes::default(), storage: Default::default(), }) @@ -145,6 +151,7 @@ impl ProviderFactory { self.db.tx()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.triedb_provider.tx()?, self.prune_modes.clone(), self.storage.clone(), )) @@ -160,6 +167,7 @@ impl ProviderFactory { self.db.tx_mut()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.triedb_provider.tx_mut()?, self.prune_modes.clone(), self.storage.clone(), ))) @@ -221,6 +229,13 @@ impl StaticFileProviderFactory for ProviderFactory { } } +impl TrieDbProviderFactory for ProviderFactory { + /// Returns TrieDB provider + fn triedb_provider(&self) -> TrieDbProvider { + self.triedb_provider.clone() + } +} + impl HeaderSyncGapProvider for ProviderFactory { type Header = HeaderTy; fn local_tip_header( @@ -545,11 +560,13 @@ where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { db, chain_spec, static_file_provider, triedb_provider, prune_modes, storage } = + self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) + .field("triedb_provider", &triedb_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) .finish() @@ -562,6 +579,7 @@ impl Clone for ProviderFactory { db: self.db.clone(), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), + triedb_provider: self.triedb_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), } @@ -582,7 +600,7 @@ mod tests { use reth_chainspec::ChainSpecBuilder; use reth_db::{ mdbx::DatabaseArguments, - test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, + test_utils::{create_test_static_files_dir, create_test_triedb_dir, ERROR_TEMPDIR}, }; use reth_db_api::tables; use reth_primitives_traits::SignerRecoverable; @@ -621,11 +639,13 @@ mod tests { fn provider_factory_with_database_path() { let chain_spec = ChainSpecBuilder::mainnet().build(); let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_triedb_dir, triedb_dir_path) = create_test_triedb_dir(); let factory = ProviderFactory::>::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep(), Arc::new(chain_spec), DatabaseArguments::new(Default::default()), StaticFileProvider::read_write(static_dir_path).unwrap(), + TrieDbProvider::open(triedb_dir_path).unwrap(), ) .unwrap(); let provider = factory.provider().unwrap(); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f0a0aa391a..64a30345b24 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -5,7 +5,8 @@ use crate::{ providers::{ database::{chain::ChainStorage, metrics}, static_file::StaticFileWriter, - NodeTypesForProvider, StaticFileProvider, + triedb::triedb_account_to_reth, + NodeTypesForProvider, StaticFileProvider, TrieDbTransaction, }, to_range, traits::{ @@ -18,7 +19,7 @@ use crate::{ OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieReader, TrieWriter, + TransactionsProviderExt, TrieDbTxProvider, TrieReader, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, @@ -84,6 +85,7 @@ use std::{ sync::Arc, }; use tracing::{debug, trace}; +use triedb::path::{AddressPath, StoragePath}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -149,6 +151,8 @@ pub struct DatabaseProvider { chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, + /// TrieDB transaction. + triedb_tx: TrieDbTransaction, /// Pruning configuration prune_modes: PruneModes, /// Node storage handler. @@ -179,7 +183,7 @@ impl DatabaseProvider { if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProviderRef::new(self))) + return Ok(Box::new(LatestStateProviderRef::new(self))); } // +1 as the changeset that we want is the one that was applied after this block. @@ -230,6 +234,20 @@ impl StaticFileProviderFactory for DatabaseProvider { } } +impl TrieDbTxProvider for DatabaseProvider { + fn triedb_tx_ref(&self) -> &TrieDbTransaction { + &self.triedb_tx + } + + fn triedb_tx(&mut self) -> &mut TrieDbTransaction { + &mut self.triedb_tx + } + + fn into_triedb_tx(self) -> TrieDbTransaction { + self.triedb_tx + } +} + impl> ChainSpecProvider for DatabaseProvider { @@ -246,10 +264,11 @@ impl DatabaseProvider { tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + triedb_tx: TrieDbTransaction, prune_modes: PruneModes, storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, triedb_tx, prune_modes, storage } } } @@ -389,7 +408,7 @@ impl TryIntoHistoricalStateProvider for Databa // if the block number is the same as the currently best block number on disk we can use the // latest state provider here if block_number == self.best_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProvider::new(self))) + return Ok(Box::new(LatestStateProvider::new(self))); } // +1 as the changeset that we want is the one that was applied after this block. @@ -453,7 +472,7 @@ where while let Some((sharded_key, list)) = item { // If the shard does not belong to the key, break. if !shard_belongs_to_key(&sharded_key) { - break + break; } // Always delete the current shard from the database first @@ -492,10 +511,11 @@ impl DatabaseProvider { tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + triedb_tx: TrieDbTransaction, prune_modes: PruneModes, storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { tx, chain_spec, static_file_provider, triedb_tx, prune_modes, storage } } /// Consume `DbTx` or `DbTxMut`. @@ -582,7 +602,7 @@ impl DatabaseProvider { F: FnMut(H, BodyTy, Range) -> ProviderResult, { if range.is_empty() { - return Ok(Vec::new()) + return Ok(Vec::new()); } let len = range.end().saturating_sub(*range.start()) as usize; @@ -771,7 +791,7 @@ impl DatabaseProvider { // delete old shard so new one can be inserted. cursor.delete_current()?; let list = list.iter().collect::>(); - return Ok(list) + return Ok(list); } Ok(Vec::new()) } @@ -818,7 +838,15 @@ impl DatabaseProvider { impl AccountReader for DatabaseProvider { fn basic_account(&self, address: &Address) -> ProviderResult> { - Ok(self.tx.get_by_encoded_key::(address)?) + tracing::trace!( + "DatabaseProvider::basic_account: Reading account from TrieDB at {}", + address + ); + let address_path = AddressPath::for_address(*address); + match self.triedb_tx.get_account(address_path)? { + Some(account) => Ok(Some(triedb_account_to_reth(&account))), + None => Ok(None), + } } } @@ -840,11 +868,16 @@ impl AccountExtReader for DatabaseProvider { &self, iter: impl IntoIterator, ) -> ProviderResult)>> { - let mut plain_accounts = self.tx.cursor_read::()?; - Ok(iter - .into_iter() - .map(|address| plain_accounts.seek_exact(address).map(|a| (address, a.map(|(_, v)| v)))) - .collect::, _>>()?) + tracing::debug!("DatabaseProvider::basic_accounts: Reading accounts from TrieDB"); + iter.into_iter() + .map(|address| { + let address_path = AddressPath::for_address(address); + match self.triedb_tx.get_account(address_path)? { + Some(account) => Ok((address, Some(triedb_account_to_reth(&account)))), + None => Ok((address, None)), + } + }) + .collect::>>() } fn changed_accounts_and_blocks_with_range( @@ -943,7 +976,7 @@ impl HeaderSyncGapProvider } Ordering::Less => { // There's either missing or corrupted files. - return Err(ProviderError::HeaderNotFound(next_static_file_block_num.into())) + return Err(ProviderError::HeaderNotFound(next_static_file_block_num.into())); } Ordering::Equal => {} } @@ -1509,18 +1542,21 @@ impl StorageReader for DatabaseProvider &self, addresses_with_keys: impl IntoIterator)>, ) -> ProviderResult)>> { - let mut plain_storage = self.tx.cursor_dup_read::()?; - + tracing::debug!("DatabaseProvider::plain_state_storages: Reading storage from TrieDB"); addresses_with_keys .into_iter() .map(|(address, storage)| { + let hashed_address = keccak256(address); storage .into_iter() .map(|key| -> ProviderResult<_> { - Ok(plain_storage - .seek_by_key_subkey(address, key)? - .filter(|v| v.key == key) - .unwrap_or_else(|| StorageEntry { key, value: Default::default() })) + let address_path = AddressPath::new(Nibbles::unpack(hashed_address)); + let storage_path = + StoragePath::for_address_path_and_slot(address_path, key); + match self.triedb_tx.get_storage_slot(storage_path)? { + Some(value) => Ok(StorageEntry { key, value }), + None => Ok(StorageEntry { key, value: Default::default() }), + } }) .collect::>>() .map(|storage| (address, storage)) @@ -1651,7 +1687,7 @@ impl StateWriter .receipts .is_some_and(|mode| mode.should_prune(block_number, tip)) { - continue + continue; } // If there are new addresses to retain after this block number, track them @@ -1667,7 +1703,7 @@ impl StateWriter has_contract_log_filter && !receipt.logs().iter().any(|log| allowed_addresses.contains(&log.address)) { - continue + continue; } if let Some(writer) = &mut receipts_static_writer { @@ -1762,15 +1798,18 @@ impl StateWriter // Write new account state tracing::trace!(len = changes.accounts.len(), "Writing new account state"); + // Database + TrieDB hybrid backend: write to both let mut accounts_cursor = self.tx_ref().cursor_write::()?; - // write account to database. for (address, account) in changes.accounts { + let hashed_address = keccak256(address); if let Some(account) = account { tracing::trace!(?address, "Updating plain state account"); - accounts_cursor.upsert(address, &account.into())?; + accounts_cursor.upsert(address, &account.clone().into())?; + self.triedb_tx.set_account(hashed_address, Some(account.into()))?; } else if accounts_cursor.seek_exact(address)?.is_some() { tracing::trace!(?address, "Deleting plain state account"); accounts_cursor.delete_current()?; + self.triedb_tx.set_account(hashed_address, None)?; } } @@ -1783,6 +1822,7 @@ impl StateWriter // Write new storage state and wipe storage if needed. tracing::trace!(len = changes.storage.len(), "Writing new storage state"); + // Database + TrieDB hybrid backend: write to both let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; for PlainStorageChangeset { address, wipe_storage, storage } in changes.storage { // Wiping of storage. @@ -1797,6 +1837,8 @@ impl StateWriter // sort storage slots by key. storage.par_sort_unstable_by_key(|a| a.key); + let hashed_address = keccak256(address); + for entry in storage { tracing::trace!(?address, ?entry.key, "Updating plain state storage"); if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? && @@ -1805,8 +1847,15 @@ impl StateWriter storages_cursor.delete_current()?; } - if !entry.value.is_zero() { + if entry.value.is_zero() { + self.triedb_tx.set_storage_slot(hashed_address, keccak256(entry.key), None)?; + } else { storages_cursor.upsert(address, &entry)?; + self.triedb_tx.set_storage_slot( + hashed_address, + keccak256(entry.key), + Some(entry.value), + )?; } } } @@ -1909,30 +1958,40 @@ impl StateWriter // iterate over local plain state remove all account and all storages. for (address, (old_account, new_account, storage)) in &state { // revert account if needed. + let hashed_address = keccak256(address); if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { plain_accounts_cursor.upsert(*address, account)?; + self.triedb_tx.set_account(hashed_address, Some(*account))?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; + self.triedb_tx.set_account(hashed_address, None)?; } } // revert storages for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; + let hashed_storage_key = keccak256(*storage_key); // delete previous value if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) .is_some() { - plain_storage_cursor.delete_current()? + plain_storage_cursor.delete_current()?; + self.triedb_tx.set_storage_slot(hashed_address, hashed_storage_key, None)?; } // insert value if needed if !old_storage_value.is_zero() { plain_storage_cursor.upsert(*address, &storage_entry)?; + self.triedb_tx.set_storage_slot( + hashed_address, + hashed_storage_key, + Some(*old_storage_value), + )?; } } } @@ -1970,7 +2029,7 @@ impl StateWriter let range = block + 1..=self.last_block_number()?; if range.is_empty() { - return Ok(ExecutionOutcome::default()) + return Ok(ExecutionOutcome::default()); } let start_block_number = *range.start(); @@ -2007,30 +2066,40 @@ impl StateWriter // iterate over local plain state remove all account and all storages. for (address, (old_account, new_account, storage)) in &state { // revert account if needed. + let hashed_address = keccak256(address); if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { plain_accounts_cursor.upsert(*address, account)?; + self.triedb_tx.set_account(hashed_address, Some(*account))?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; + self.triedb_tx.set_account(hashed_address, None)?; } } // revert storages for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; + let hashed_storage_key = keccak256(*storage_key); // delete previous value if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) .is_some() { - plain_storage_cursor.delete_current()? + plain_storage_cursor.delete_current()?; + self.triedb_tx.set_storage_slot(hashed_address, hashed_storage_key, None)?; } // insert value if needed if !old_storage_value.is_zero() { plain_storage_cursor.upsert(*address, &storage_entry)?; + self.triedb_tx.set_storage_slot( + hashed_address, + hashed_storage_key, + Some(*old_storage_value), + )?; } } } @@ -2089,7 +2158,7 @@ impl TrieWriter for DatabaseProvider /// Returns the number of entries modified. fn write_trie_updates_sorted(&self, trie_updates: &TrieUpdatesSorted) -> ProviderResult { if trie_updates.is_empty() { - return Ok(0) + return Ok(0); } // Track the number of inserted entries. @@ -2507,8 +2576,10 @@ impl HashingWriter for DatabaseProvi for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, account)?; + self.triedb_tx.set_account(*hashed_address, Some(*account))?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; + self.triedb_tx.set_account(*hashed_address, None)?; } } Ok(hashed_accounts) @@ -2592,10 +2663,12 @@ impl HashingWriter for DatabaseProvi .is_some() { hashed_storage_cursor.delete_current()?; + self.triedb_tx.set_storage_slot(hashed_address, key, None)?; } if !value.is_zero() { hashed_storage_cursor.upsert(hashed_address, &StorageEntry { key, value })?; + self.triedb_tx.set_storage_slot(hashed_address, key, Some(value))?; } Ok(()) }) @@ -2998,7 +3071,7 @@ impl BlockWrite ) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to append empty block range"); - return Ok(()) + return Ok(()); } // Blocks are not empty, so no need to handle the case of `blocks.first()` being @@ -3144,11 +3217,13 @@ impl DBProvider for DatabaseProvider // truncate the static files according to the // checkpoints on the next start-up. if self.static_file_provider.has_unwind_queued() { + self.triedb_tx.commit()?; self.tx.commit()?; self.static_file_provider.commit()?; } else { self.static_file_provider.commit()?; self.tx.commit()?; + self.triedb_tx.commit()?; } Ok(true) diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 41e8121991b..3f189cbcc75 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -13,6 +13,10 @@ pub use static_file::{ StaticFileProviderRWRefMut, StaticFileWriter, }; +#[allow(missing_docs)] +pub mod triedb; +pub use triedb::{TrieDbProvider, TrieDbTransaction}; + mod state; pub use state::{ historical::{HistoricalStateProvider, HistoricalStateProviderRef, LowestAvailableBlocks}, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 666138fae7b..577894619de 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,7 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + providers::{state::macros::delegate_provider_impls, triedb::triedb_account_to_reth}, + AccountReader, BlockHashReader, ChangeSetReader, HashedPostStateProvider, ProviderError, + StateProvider, StateRootProvider, TrieDbTxProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -30,6 +31,7 @@ use reth_trie_db::{ }; use std::fmt::Debug; +use triedb::path::{AddressPath, StoragePath}; /// State provider for a given block number which takes a tx reference. /// @@ -241,7 +243,7 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. @@ -259,7 +261,11 @@ impl AccountReader .map(|account_before| account_before.info) } HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { - Ok(self.tx().get_by_encoded_key::(address)?) + let address_path = AddressPath::for_address(*address); + match self.provider.triedb_tx_ref().get_account(address_path)? { + Some(account) => Ok(Some(triedb_account_to_reth(&account))), + None => Ok(None), + } } } } @@ -396,8 +402,9 @@ impl HashedPostStateProvider for HistoricalStateProviderRef<'_, } } -impl StateProvider - for HistoricalStateProviderRef<'_, Provider> +impl< + Provider: DBProvider + TrieDbTxProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + > StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. fn storage( @@ -419,13 +426,15 @@ impl })? .value, )), - HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self - .tx() - .cursor_dup_read::()? - .seek_by_key_subkey(address, storage_key)? - .filter(|entry| entry.key == storage_key) - .map(|entry| entry.value) - .or(Some(StorageValue::ZERO))), + HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { + let address_path = AddressPath::for_address(address); + let storage_path = + StoragePath::for_address_path_and_slot(address_path, storage_key); + match self.provider.triedb_tx_ref().get_storage_slot(storage_path)? { + Some(value) => Ok(Some(value)), + None => Ok(Some(StorageValue::ZERO)), + } + } } } } @@ -487,7 +496,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + TrieDbTxProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -523,6 +532,7 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, + TrieDbTxProvider, }; use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db_api::{ @@ -545,7 +555,7 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] const fn assert_historical_state_provider< - T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + T: DBProvider + TrieDbTxProvider + BlockNumReader + BlockHashReader + ChangeSetReader, >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 092feb37c43..42ae916f1a0 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,9 +1,10 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, StateProvider, StateRootProvider, + providers::{state::macros::delegate_provider_impls, triedb::triedb_account_to_reth}, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProvider, StateRootProvider, + TrieDbTxProvider, }; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; -use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx}; +use reth_db_api::{tables, transaction::DbTx}; use reth_primitives_traits::{Account, Bytecode}; use reth_storage_api::{BytecodeReader, DBProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -18,6 +19,7 @@ use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; +use triedb::path::{AddressPath, StoragePath}; /// State provider over latest state that takes tx reference. /// @@ -36,10 +38,19 @@ impl<'b, Provider: DBProvider> LatestStateProviderRef<'b, Provider> { } } -impl AccountReader for LatestStateProviderRef<'_, Provider> { +impl AccountReader + for LatestStateProviderRef<'_, Provider> +{ /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { - self.tx().get_by_encoded_key::(address).map_err(Into::into) + tracing::warn!( + "LatestStateProviderRef::basic_account: Reading account from TrieDB at {}", + address + ); + self.0 + .triedb_tx_ref() + .get_account(AddressPath::for_address(*address)) + .map(|account| account.map(|account| triedb_account_to_reth(&account))) } } @@ -150,7 +161,7 @@ impl HashedPostStateProvider for LatestStateProvide } } -impl StateProvider +impl StateProvider for LatestStateProviderRef<'_, Provider> { /// Get storage. @@ -159,13 +170,13 @@ impl StateProvider account: Address, storage_key: StorageKey, ) -> ProviderResult> { - let mut cursor = self.tx().cursor_dup_read::()?; - if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? && - entry.key == storage_key - { - return Ok(Some(entry.value)) - } - Ok(None) + tracing::warn!( + "LatestStateProviderRef::storage: Reading storage from TrieDB at {}", + account + ); + self.0 + .triedb_tx_ref() + .get_storage_slot(StoragePath::for_address_and_slot(account, storage_key)) } } @@ -196,7 +207,7 @@ impl LatestStateProvider { } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader ]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + TrieDbTxProvider ]); #[cfg(test)] mod tests { @@ -204,7 +215,7 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider() { assert_state_provider::>(); } } diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index d3ef87e6c49..7466e674a1b 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,3 +1,4 @@ +use crate::{providers::TrieDbTransaction, TrieDbTxProvider}; use alloy_primitives::{BlockNumber, B256}; use reth_db_api::DatabaseError; use reth_errors::{ProviderError, ProviderResult}; @@ -323,3 +324,20 @@ where hashed_cursor_factory.hashed_storage_cursor(hashed_address) } } + +impl TrieDbTxProvider for OverlayStateProvider +where + Provider: DBProvider + TrieDbTxProvider, +{ + fn triedb_tx_ref(&self) -> &TrieDbTransaction { + self.provider.triedb_tx_ref() + } + + fn triedb_tx(&mut self) -> &mut TrieDbTransaction { + self.provider.triedb_tx() + } + + fn into_triedb_tx(self) -> TrieDbTransaction { + self.provider.into_triedb_tx() + } +} diff --git a/crates/storage/provider/src/providers/triedb/mod.rs b/crates/storage/provider/src/providers/triedb/mod.rs new file mode 100644 index 00000000000..31a44f07bf5 --- /dev/null +++ b/crates/storage/provider/src/providers/triedb/mod.rs @@ -0,0 +1,569 @@ +mod root; + +pub use root::TrieDbOverlayStateRoot; + +use std::{ + collections::BTreeMap, + fs::create_dir_all, + ops::Deref, + path::Path, + sync::{Arc, Mutex}, +}; + +use alloy_primitives::{Address, StorageValue, B256, U256}; +use alloy_trie::{Nibbles, EMPTY_ROOT_HASH}; +use reth_db::triedb::TrieDbTxRW; + +use crate::errors::triedb::TrieDBError as RethTrieDBError; +use reth_db_api::triedb::TrieDbTx; +use reth_primitives_traits::Account as RethAccount; +use reth_storage_errors::{ + db::DatabaseError, + provider::{ProviderError, ProviderResult}, +}; + +use alloy_consensus::constants::KECCAK_EMPTY; +use triedb::{ + account::Account as TrieDBAccount, + database::{begin_ro, begin_rw}, + overlay::OverlayState, + path::{AddressPath, StoragePath}, + storage::overlay_root::OverlayedRoot, + transaction::{Transaction, RO, RW}, + Database, +}; + +// Account type conversion utilities between Reth and TrieDB +// We can't use From traits due to orphan rules, so we provide conversion functions +pub fn reth_account_to_triedb(reth_account: &RethAccount) -> TrieDBAccount { + TrieDBAccount { + nonce: reth_account.nonce, + balance: reth_account.balance, + storage_root: EMPTY_ROOT_HASH, // Default empty root for accounts without storage + code_hash: reth_account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + } +} + +pub fn triedb_account_to_reth(triedb_account: &TrieDBAccount) -> RethAccount { + RethAccount { + nonce: triedb_account.nonce, + balance: triedb_account.balance, + bytecode_hash: if triedb_account.code_hash == KECCAK_EMPTY { + None + } else { + Some(triedb_account.code_hash) + }, + } +} + +#[derive(Debug, Clone)] +pub struct TrieDbProvider { + inner: Arc, +} + +impl TrieDbProvider { + /// Creates a new [`TrieDBProvider`]. + pub fn open(path: impl AsRef) -> ProviderResult { + let database_file_path = path.as_ref().join("triedb.dat"); + let file_exists = database_file_path.exists(); + let db = if file_exists { + Database::open(database_file_path.to_str().expect("Path must be valid UTF-8"))? + } else { + if !path.as_ref().exists() { + create_dir_all(path).expect("unable to create directory for triedb"); + } + Database::create_new(database_file_path.to_str().expect("Path must be valid UTF-8")) + .map_err(|e| { + ProviderError::Database(DatabaseError::Other(format!( + "TrieDB creation failed: {:?}", + e + ))) + })? + }; + Ok(Self { inner: Arc::new(db) }) + } + + /// Returns a read-only TrieDB transaction. + pub fn tx(&self) -> ProviderResult { + let tx = begin_ro(self.inner.clone())?; + Ok(TrieDbTransaction { inner: Mutex::new(TrieDbTransactionInner::RO(tx)) }) + } + + /// Returns a read-write TrieDB transaction. + pub fn tx_mut(&self) -> ProviderResult { + let tx = begin_rw(self.inner.clone())?; + Ok(TrieDbTransaction { inner: Mutex::new(TrieDbTransactionInner::RW(tx)) }) + } +} + +#[derive(Debug)] +enum TrieDbTransactionInner> { + RO(Transaction), + RW(Transaction), +} + +// TODO: can be represented as separate read / write traits. +#[derive(Debug)] +pub struct TrieDbTransaction { + inner: Mutex>>, +} + +impl TrieDbTransaction { + pub fn get_account(&self, address_path: AddressPath) -> ProviderResult> { + let account = match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RO(tx) => tx.get_account(&address_path)?, + TrieDbTransactionInner::RW(tx) => tx.get_account(&address_path)?, + }; + + Ok(account) + } + + pub fn get_storage_slot( + &self, + storage_path: StoragePath, + ) -> ProviderResult> { + let storage = match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RO(tx) => tx.get_storage_slot(&storage_path)?, + TrieDbTransactionInner::RW(tx) => tx.get_storage_slot(&storage_path)?, + }; + + Ok(storage) + } + + pub fn apply_changes(&self) -> ProviderResult<()> { + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(_tx) => { + // TODO: apply changes + // tx.apply_changes()?; + } + TrieDbTransactionInner::RO(_) => { + // Read-only transactions don't have changes to apply + } + } + Ok(()) + } + + pub fn commit(self) -> ProviderResult<()> { + match self.inner.into_inner().unwrap() { + TrieDbTransactionInner::RO(tx) => { + tx.commit()?; + } + TrieDbTransactionInner::RW(tx) => { + tx.commit()?; + } + }; + + Ok(()) + } + + pub fn set_account( + &self, + hashed_address: B256, + account: Option, + ) -> ProviderResult<(AddressPath, Option)> { + // TODO: cleaner way to handle all this? e.g. Supporting `From` for Reth (or Revm) account + // in TrieDB account. + let address_path = AddressPath::new(Nibbles::unpack(hashed_address)); + + let triedb_account_option: Option = + account.as_ref().map(reth_account_to_triedb); + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(tx) => { + tx.set_account(address_path.clone(), triedb_account_option.clone())? + } + + _ => return Err(RethTrieDBError::ReadOnlyWriteError.into()), + } + + Ok((address_path, triedb_account_option)) + } + + pub fn set_account_address( + &self, + address: Address, + account: Option, + ) -> ProviderResult<(AddressPath, Option)> { + // TODO: cleaner way to handle all this? e.g. Supporting `From` for Reth (or Revm) account + // in TrieDB account. + let address_path = AddressPath::for_address(address); + + let triedb_account_option: Option = + account.as_ref().map(reth_account_to_triedb); + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(tx) => { + tx.set_account(address_path.clone(), triedb_account_option.clone())? + } + + _ => return Err(RethTrieDBError::ReadOnlyWriteError.into()), + } + + Ok((address_path, triedb_account_option)) + } + + pub fn set_storage_slot( + &self, + hashed_address: B256, + key: B256, + value: Option, + ) -> ProviderResult<(StoragePath, Option)> { + let address_path = AddressPath::new(Nibbles::unpack(hashed_address)); + let storage_path = + StoragePath::for_address_path_and_slot_hash(address_path, Nibbles::unpack(key)); + + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(tx) => tx.set_storage_slot(storage_path.clone(), value)?, + + _ => return Err(RethTrieDBError::ReadOnlyWriteError.into()), + } + + Ok((storage_path, value)) + } + + pub fn state_root(&self) -> B256 { + let state_root = match &*self.inner.lock().unwrap() { + TrieDbTransactionInner::RO(tx) => tx.state_root(), + TrieDbTransactionInner::RW(tx) => tx.state_root(), + }; + + state_root + } + + pub fn compute_root_with_overlay( + &self, + overlay_state: OverlayState, + ) -> ProviderResult { + let overlayed_root = match &*self.inner.lock().unwrap() { + TrieDbTransactionInner::RO(tx) => tx.compute_root_with_overlay(overlay_state)?, + TrieDbTransactionInner::RW(tx) => tx.compute_root_with_overlay(overlay_state)?, + }; + + Ok(overlayed_root) + } + + pub fn assert_state_root(&self, expected_state_root: B256) -> ProviderResult<()> { + let triedb_state_root = self.state_root(); + if triedb_state_root != expected_state_root { + return Err(RethTrieDBError::AssertionError(format!( + "state root mismatch: expected: {:?}, got: {:?}", + expected_state_root, triedb_state_root + )) + .into()); + } + + Ok(()) + } + + pub fn assert_db( + &self, + address: Address, + expected_account: TrieDBAccount, + expected_storage: BTreeMap, + ) -> ProviderResult<()> { + let address_path = AddressPath::for_address(address); + let db_account = self.get_account(address_path.clone())?; + if db_account.is_none() { + return Err(RethTrieDBError::AssertionError(format!( + "account in database is none. address: {:?}", + address + )) + .into()); + } + + let db_account = db_account.unwrap(); + + if db_account.balance != expected_account.balance { + return Err(RethTrieDBError::AssertionError(format!( + "account in database has a different balance: expected: {:?}, got: {:?}", + expected_account.balance, db_account.balance + )) + .into()); + } + + if db_account.nonce != expected_account.nonce { + return Err(RethTrieDBError::AssertionError(format!( + "account in database has a different nonce: expected: {:?}, got: {:?}", + expected_account.nonce, db_account.nonce + )) + .into()); + } + + if db_account.code_hash != expected_account.code_hash { + return Err(RethTrieDBError::AssertionError(format!( + "account in database has a different code hash: expected: {:?}, got: {:?}", + expected_account.code_hash, db_account.code_hash + )) + .into()); + } + + for (slot, value) in &expected_storage { + let storage_path = + StoragePath::for_address_path_and_slot(address_path.clone(), (*slot).into()); + let storage_value = self.get_storage_slot(storage_path.clone())?; + if storage_value.is_none() { + println!("assert address storage path is {:?}", storage_path.clone()); + return Err(RethTrieDBError::AssertionError(format!( + "account storage in database is none. address: {:?}, slot: {:?}", + address, slot + )) + .into()); + } + + let storage_value = storage_value.unwrap(); + + if storage_value != *value { + return Err(RethTrieDBError::AssertionError(format!( + "account storage in database has a different value for slot: {:?}. expected: {:?}, got: {:?}", + slot, value, storage_value + )) + .into()); + } + } + + Ok(()) + } + + /* + TODO: re-enable proofs + + /// Get account with proof using TrieDB's native proof generation + pub fn get_account_with_proof( + &self, + address_path: AddressPath, + ) -> ProviderResult> { + let mut inner = self.inner.lock().unwrap(); + match &mut *inner { + TrieDbTransactionInner::RO(ref mut tx) => { + match tx.get_account_with_proof(address_path) { + Ok(Some((account, proof))) => { + let reth_account = triedb_account_to_reth(account); + Ok(Some((reth_account, proof))) + } + Ok(None) => Ok(None), + Err(e) => Err(ProviderError::Database(DatabaseError::Other(e.to_string()))), + } + } + TrieDbTransactionInner::RW(ref mut tx) => { + match tx.get_account_with_proof(address_path) { + Ok(Some((account, proof))) => { + let reth_account = triedb_account_to_reth(account); + Ok(Some((reth_account, proof))) + } + Ok(None) => Ok(None), + Err(e) => Err(ProviderError::Database(DatabaseError::Other(e.to_string()))), + } + } + } + } + + /// Get storage with proof using TrieDB's native proof generation + pub fn get_storage_with_proof( + &self, + storage_path: StoragePath, + ) -> ProviderResult> { + let mut inner = self.inner.lock().unwrap(); + match &mut *inner { + TrieDbTransactionInner::RO(ref mut tx) => { + match tx.get_storage_with_proof(storage_path) { + Ok(result) => Ok(result), + Err(e) => Err(ProviderError::Database(DatabaseError::Other(e.to_string()))), + } + } + TrieDbTransactionInner::RW(ref mut tx) => { + match tx.get_storage_with_proof(storage_path) { + Ok(result) => Ok(result), + Err(e) => Err(ProviderError::Database(DatabaseError::Other(e.to_string()))), + } + } + } + } + + */ +} + +impl TrieDbTx for TrieDbTransaction { + fn get_account( + &self, + address_path: AddressPath, + ) -> Result, DatabaseError> { + self.get_account(address_path).map_err(|e| DatabaseError::Other(e.to_string())) + } + + fn get_storage_slot( + &self, + storage_path: StoragePath, + ) -> Result, DatabaseError> { + self.get_storage_slot(storage_path).map_err(|e| DatabaseError::Other(e.to_string())) + } + + fn commit(self) -> Result<(), DatabaseError> { + self.commit().map_err(|e| DatabaseError::Other(e.to_string())) + } +} + +impl TrieDbTxRW for TrieDbTransaction { + fn set_account( + &self, + address_path: AddressPath, + account: Option, + ) -> Result<(), DatabaseError> { + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(tx) => { + tx.set_account(address_path, account) + .map_err(|e| DatabaseError::Other(e.to_string()))?; + Ok(()) + } + _ => { + Err(DatabaseError::Other("Cannot set account on read-only transaction".to_string())) + } + } + } + + fn set_storage_slot( + &self, + storage_path: StoragePath, + value: Option, + ) -> Result<(), DatabaseError> { + match &mut *self.inner.lock().unwrap() { + TrieDbTransactionInner::RW(tx) => { + tx.set_storage_slot(storage_path, value) + .map_err(|e| DatabaseError::Other(e.to_string()))?; + Ok(()) + } + _ => { + Err(DatabaseError::Other("Cannot set storage on read-only transaction".to_string())) + } + } + } + + fn apply_changes(&self) -> Result<(), DatabaseError> { + self.apply_changes().map_err(|e| DatabaseError::Other(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{Address, B256, U256}; + use alloy_trie::Nibbles; + use tempfile::TempDir; + + fn create_test_triedb_provider() -> (TrieDbProvider, TempDir) { + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let provider = + TrieDbProvider::open(temp_dir.path()).expect("Failed to create TrieDbProvider"); + (provider, temp_dir) + } + + #[test] + fn test_account_type_conversions() { + // Test Reth -> TrieDB conversion + let reth_account = RethAccount { + nonce: 42, + balance: U256::from(1000000), + bytecode_hash: Some(B256::from([1u8; 32])), + }; + + let triedb_account = reth_account_to_triedb(&reth_account); + assert_eq!(triedb_account.nonce, 42); + assert_eq!(triedb_account.balance, U256::from(1000000)); + assert_eq!(triedb_account.code_hash, B256::from([1u8; 32])); + assert_eq!(triedb_account.storage_root, EMPTY_ROOT_HASH); + + // Test TrieDB -> Reth conversion + let converted_back = triedb_account_to_reth(&triedb_account); + assert_eq!(converted_back.nonce, reth_account.nonce); + assert_eq!(converted_back.balance, reth_account.balance); + assert_eq!(converted_back.bytecode_hash, reth_account.bytecode_hash); + } + + #[test] + fn test_account_type_conversions_no_code() { + // Test conversion with no bytecode hash + let reth_account = RethAccount { nonce: 10, balance: U256::from(500), bytecode_hash: None }; + + let triedb_account = reth_account_to_triedb(&reth_account); + assert_eq!(triedb_account.code_hash, KECCAK_EMPTY); + + let converted_back = triedb_account_to_reth(&triedb_account); + assert_eq!(converted_back.bytecode_hash, None); + } + + #[test] + fn test_triedb_provider_creation() { + let (_provider, _temp_dir) = create_test_triedb_provider(); + // Provider creation should succeed without panicking + } + + #[test] + fn test_triedb_transaction_creation() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Test read-only transaction + let ro_tx = provider.tx().expect("Failed to create RO transaction"); + assert!(ro_tx.inner.lock().is_ok()); + + // Test read-write transaction + let rw_tx = provider.tx_mut().expect("Failed to create RW transaction"); + assert!(rw_tx.inner.lock().is_ok()); + } + + #[test] + fn test_triedb_account_operations() { + let (provider, _temp_dir) = create_test_triedb_provider(); + let tx = provider.tx_mut().expect("Failed to create RW transaction"); + + // Test account operations + let address = Address::from([0x42; 20]); + let account = RethAccount { nonce: 1, balance: U256::from(1000), bytecode_hash: None }; + + // Set account + let result = tx.set_account_address(address, Some(account)); + assert!(result.is_ok()); + + // Apply changes to make them visible in the trie + tx.apply_changes().expect("Failed to apply changes"); + + // Get account - should be visible after apply_changes + let address_path = AddressPath::for_address(address); + let retrieved = tx.get_account(address_path).expect("Failed to get account"); + assert!(retrieved.is_some(), "Account should be visible after applying changes"); + let retrieved_account = retrieved.unwrap(); + assert_eq!(retrieved_account.nonce, account.nonce); + assert_eq!(retrieved_account.balance, account.balance); + } + + #[test] + fn test_triedb_storage_operations() { + let (provider, _temp_dir) = create_test_triedb_provider(); + let tx = provider.tx_mut().expect("Failed to create RW transaction"); + + // Set up account first + let address = Address::from([0x42; 20]); + let account = RethAccount { nonce: 1, balance: U256::from(1000), bytecode_hash: None }; + tx.set_account_address(address, Some(account)).expect("Failed to set account"); + + // Test storage operations + let storage_key = B256::from([0x01; 32]); + let storage_value = U256::from(0x1234); + + // Apply changes for account to be visible + tx.apply_changes().expect("Failed to apply account changes"); + + // Set storage - use keccak hash of address + let hashed_address = alloy_primitives::keccak256(address.0); + tx.set_storage_slot(hashed_address, storage_key, Some(storage_value)) + .expect("Failed to set storage slot"); + + // Apply changes to make storage visible in the trie + tx.apply_changes().expect("Failed to apply storage changes"); + + // Get storage - should be visible after apply_changes + // Use the same path construction as in set_storage_slot + let address_path = AddressPath::new(Nibbles::unpack(hashed_address)); + let storage_path = + StoragePath::for_address_path_and_slot_hash(address_path, Nibbles::unpack(storage_key)); + let retrieved = tx.get_storage_slot(storage_path).expect("Failed to get storage"); + assert!(retrieved.is_some(), "Storage should be visible after applying changes"); + assert_eq!(retrieved.unwrap(), storage_value); + } +} diff --git a/crates/storage/provider/src/providers/triedb/root.rs b/crates/storage/provider/src/providers/triedb/root.rs new file mode 100644 index 00000000000..d4b6275cf35 --- /dev/null +++ b/crates/storage/provider/src/providers/triedb/root.rs @@ -0,0 +1,1309 @@ +use std::time::Instant; + +use alloy_primitives::{ + map::{B256Map, HashSet}, + B256, +}; +use alloy_trie::Nibbles; +use reth_trie::{ + updates::{StorageTrieUpdates, TrieUpdates}, + HashedPostState, TrieInput, +}; +use reth_trie_common::prefix_set::TriePrefixSets; +use triedb::overlay::{OverlayState, OverlayStateMut, OverlayValue}; + +use crate::providers::triedb::{reth_account_to_triedb, TrieDbTransaction}; +use reth_storage_errors::provider::ProviderError; + +#[derive(Debug)] +pub struct TrieDbOverlayStateRoot { + tx: TrieDbTransaction, + input_nodes: TrieUpdates, + input_state: HashedPostState, + input_prefix_sets: TriePrefixSets, +} + +impl TrieDbOverlayStateRoot { + pub fn new(tx: TrieDbTransaction, input: TrieInput) -> Self { + Self { + tx, + input_nodes: input.nodes, + input_state: input.state, + input_prefix_sets: input.prefix_sets.freeze(), + } + } + + pub fn incremental_root(self) -> Result { + let (root, _) = self.calculate(false)?; + Ok(root) + } + + pub fn incremental_root_with_updates(self) -> Result<(B256, TrieUpdates), ProviderError> { + self.calculate(true) + } + + fn calculate(self, _retain_updates: bool) -> Result<(B256, TrieUpdates), ProviderError> { + let start_time = Instant::now(); + let (overlay_state, removed_keys, mut removed_storage_keys) = + build_overlay_state(self.input_nodes, self.input_state, self.input_prefix_sets); + let overlay_time = start_time.elapsed(); + let overlayed_root = self.tx.compute_root_with_overlay(overlay_state)?; + let overlayed_root_time = start_time.elapsed(); + let (root, branch_updates, storage_updates) = ( + overlayed_root.root, + overlayed_root.updated_branch_nodes, + overlayed_root.storage_branch_updates, + ); + let mut trie_updates = TrieUpdates::default(); + trie_updates.account_nodes.extend(branch_updates); + for (hashed_address, storage_updates) in storage_updates { + trie_updates.insert_storage_updates( + hashed_address, + StorageTrieUpdates { + is_deleted: false, + storage_nodes: storage_updates, + removed_nodes: removed_storage_keys.remove(&hashed_address).unwrap_or_default(), + }, + ); + } + trie_updates.removed_nodes.extend(removed_keys); + let trie_updates_time = start_time.elapsed(); + tracing::debug!( + target: "TrieDBOverlayStateRoot::calculate", + overlay_time = ?overlay_time, + "Computed TrieDB overlay" + ); + tracing::debug!( + target: "TrieDBOverlayStateRoot::calculate", + overlayed_root_time = ?overlayed_root_time, + "Computed TrieDB overlayed root" + ); + tracing::debug!( + target: "TrieDBOverlayStateRoot::calculate", + trie_updates_time = ?trie_updates_time, + "Computed TrieDB trie updates" + ); + Ok((root, trie_updates)) + } +} + +fn build_overlay_state( + input_nodes: TrieUpdates, + input_state: HashedPostState, + mut input_prefix_sets: TriePrefixSets, +) -> (OverlayState, HashSet, B256Map>) { + let mut removed_keys = HashSet::default(); + let mut removed_storage_keys: B256Map> = B256Map::default(); + let input_nodes = input_nodes.into_sorted(); + let mut overlay_mut = OverlayStateMut::with_capacity( + input_nodes.account_nodes_ref().len() * 16 + input_nodes.storage_tries_ref().len() * 16, + ); + for (key, branch) in input_nodes.account_nodes_ref() { + if input_prefix_sets.account_prefix_set.contains(key) { + removed_keys.insert(*key); + continue; + } + let mut hash_idx = 0; + let mut path = *key; + for i in 0..16 { + if let Some(branch) = branch && + branch.hash_mask.is_bit_set(i) + { + path.push(i); + overlay_mut.insert(path.into(), Some(OverlayValue::Hash(branch.hashes[hash_idx]))); + hash_idx += 1; + path.pop(); + } + } + } + for (account, storage_updates) in input_nodes.storage_tries_ref() { + let mut storage_prefix_set = input_prefix_sets.storage_prefix_sets.get_mut(account); + for (key, branch) in &storage_updates.storage_nodes { + if let Some(ref mut prefix_set) = storage_prefix_set { + if prefix_set.contains(&key) { + removed_storage_keys.entry(*account).or_default().insert(*key); + continue; + } + } + let mut hash_idx = 0; + let mut path = Nibbles::unpack(account).join(&key); + for i in 0..16 { + if let Some(branch) = branch && + branch.hash_mask.is_bit_set(i) + { + path.push(i); + overlay_mut.insert( + path.clone().into(), + Some(OverlayValue::Hash(branch.hashes[hash_idx])), + ); + hash_idx += 1; + path.pop(); + } + } + } + } + for (key, node) in input_state.accounts { + if let Some(account) = node { + overlay_mut.insert( + Nibbles::unpack(key).into(), + Some(OverlayValue::Account(reth_account_to_triedb(&account))), + ); + } else { + overlay_mut.insert(Nibbles::unpack(key).into(), None); + } + } + for (account, storage_updates) in input_state.storages { + let account_path = Nibbles::unpack(account); + for (key, value) in storage_updates.storage.into_iter() { + overlay_mut.insert( + account_path.join(&Nibbles::unpack(key)).into(), + Some(OverlayValue::Storage(value)), + ); + } + } + (overlay_mut.freeze(), removed_keys, removed_storage_keys) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::providers::triedb::TrieDbProvider; + use alloy_primitives::{Address, B256, U256}; + use reth_primitives_traits::Account as RethAccount; + use reth_trie::{HashedPostState, HashedStorage, EMPTY_ROOT_HASH}; + use tempfile::TempDir; + use triedb::path::AddressPath; + + fn create_test_triedb_provider() -> (TrieDbProvider, TempDir) { + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let provider = + TrieDbProvider::open(temp_dir.path()).expect("Failed to create TrieDbProvider"); + (provider, temp_dir) + } + + #[test] + fn test_state_root_operations() { + let (provider, _temp_dir) = create_test_triedb_provider(); + let tx = provider.tx().expect("Failed to create transaction"); + + // Test state root retrieval + let state_root = tx.state_root(); + assert_eq!(state_root.len(), 32); // Should be a valid B256 + + // Test state root assertion (should pass for empty trie) + let assert_result = tx.assert_state_root(state_root); + assert!(assert_result.is_ok()); + + // Test state root assertion with wrong root (should fail) + let wrong_root = B256::from([0xFF; 32]); + let assert_result = tx.assert_state_root(wrong_root); + assert!(assert_result.is_err()); + } + + #[test] + fn test_triedb_state_root_with_overlays() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed a small amount of initial data into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + // Use known hashed addresses for predictable trie structure + let hashed_addr1 = B256::from([0x01; 32]); // Simple known hash 1 + let hashed_addr2 = B256::from([0x02; 32]); // Simple known hash 2 + + let account1 = RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xAA; 32])), + }; + + let account2 = RethAccount { nonce: 2, balance: U256::from(2000), bytecode_hash: None }; + + // Set accounts using the hashed addresses directly + initial_tx + .set_account(hashed_addr1, Some(account1.clone())) + .expect("Failed to set initial account 1"); + initial_tx + .set_account(hashed_addr2, Some(account2.clone())) + .expect("Failed to set initial account 2"); + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root with seeded data + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + // Phase 2: Create simple overlay state + let mut hashed_state = HashedPostState::default(); + + // Modify account1 - change nonce and balance (using the same known hashed address) + let modified_account1 = RethAccount { + nonce: 10, // Changed from 1 to 10 + balance: U256::from(5000), // Changed from 1000 to 5000 + bytecode_hash: Some(B256::from([0xBB; 32])), // Changed bytecode + }; + hashed_state.accounts.insert(hashed_addr1, Some(modified_account1.clone())); + + // Phase 3: Calculate state root WITH overlay + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify overlay state root is different from baseline + assert_ne!( + overlay_state_root, baseline_state_root, + "Overlay state root should differ from baseline" + ); + + // Phase 4: Commit the overlay changes to TrieDB (making it authoritative) + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + // Apply the account modification using the hashed address + commit_tx + .set_account(hashed_addr1, Some(modified_account1)) + .expect("Failed to apply account change"); + commit_tx.commit().expect("Failed to commit overlay changes"); + + // Phase 5: Get state root from TrieDB after committing overlay (authoritative) + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + // Phase 6: Calculate state root again with empty overlay (should match committed) + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate state root with empty overlay"); + + // Verify the state roots match + assert_eq!( + overlay_state_root, committed_state_root, + "State root with overlay should match TrieDB state root after committing overlay" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match calculation with empty overlay" + ); + + // Verify final state root is different from baseline + assert_ne!( + committed_state_root, baseline_state_root, + "Final state root should be different from baseline after changes" + ); + } + + #[test] + fn test_state_root_with_known_values() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Create a simple known state + let mut hashed_state = HashedPostState::default(); + + // Use a simple address that will hash to a known value + let address = Address::ZERO; + let hashed_address = alloy_primitives::keccak256(address.0); + + let account = RethAccount { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; + hashed_state.accounts.insert(hashed_address, Some(account)); + + // Calculate state root + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let state_root_result = state_root_calc.incremental_root(); + + assert!(state_root_result.is_ok()); + let state_root = state_root_result.unwrap(); + + // For a single empty account, we should get a predictable root + assert_eq!(state_root.len(), 32); // Valid B256 + assert_ne!(state_root, EMPTY_ROOT_HASH); // Should not be empty with an account + + // The exact root value depends on the trie implementation, but should be consistent + println!("State root for empty account: {:?}", state_root); + } + + #[test] + fn test_state_root_simple_debug() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed just 3 accounts into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let accounts_data = vec![ + ( + B256::from([0x01; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xA1; 32])), + }, + ), + ( + B256::from([0x02; 32]), + RethAccount { + nonce: 2, + balance: U256::from(2000), + bytecode_hash: Some(B256::from([0xA2; 32])), + }, + ), + ( + B256::from([0x03; 32]), + RethAccount { nonce: 3, balance: U256::from(3000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in &accounts_data { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + println!("Baseline state root: {:?}", baseline_state_root); + + // Phase 2: Create overlay modifying just ONE account + let mut hashed_state = HashedPostState::default(); + + // Modify only account 1 + hashed_state.accounts.insert( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 10, // Changed from 1 + balance: U256::from(10000), // Changed from 1000 + bytecode_hash: Some(B256::from([0xB1; 32])), // Changed bytecode + }), + ); + + let trie_input = TrieInput::from_state(hashed_state); + + // Phase 3: Calculate state root with overlay + println!( + "DEBUG: Starting overlay state root calculation with {} accounts in overlay", + trie_input.state.accounts.len() + ); + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + trie_input, + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + println!("Overlay state root: {:?}", overlay_state_root); + + // Verify overlay state root differs from baseline + assert_ne!( + overlay_state_root, baseline_state_root, + "Overlay state root should differ from baseline" + ); + + // Phase 4: Commit overlay to TrieDB (step by step) + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + // Apply just the single modification + commit_tx + .set_account( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 10, + balance: U256::from(10000), + bytecode_hash: Some(B256::from([0xB1; 32])), + }), + ) + .expect("Failed to apply account change"); + commit_tx.commit().expect("Failed to commit overlay changes"); + + // Phase 5: Verify consistency with detailed debugging + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + println!("Committed state root: {:?}", committed_state_root); + + // Debug what's actually in TrieDB after commit + // let debug_state_root = debug_triedb_contents(&provider, "TrieDB contents after commit"); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + println!("Empty overlay state root: {:?}", empty_overlay_state_root); + + // All three calculations should match + assert_eq!( + overlay_state_root, committed_state_root, + "State root with overlay should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_debug_missing_account_2() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed 3 accounts into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let accounts_data = vec![ + ( + B256::from([0x01; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xA1; 32])), + }, + ), + ( + B256::from([0x02; 32]), + RethAccount { + nonce: 2, + balance: U256::from(2000), + bytecode_hash: Some(B256::from([0xA2; 32])), + }, + ), + ( + B256::from([0x03; 32]), + RethAccount { nonce: 3, balance: U256::from(3000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in &accounts_data { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Phase 2: Create overlay modifying only account 3 (not 1 or 2) + let mut hashed_state = HashedPostState::default(); + + // Modify only account 3 + hashed_state.accounts.insert( + B256::from([0x03; 32]), + Some(RethAccount { + nonce: 30, // Changed from 3 + balance: U256::from(30000), // Changed from 3000 + bytecode_hash: Some(B256::from([0xB3; 32])), // Added bytecode + }), + ); + + // Phase 3: Calculate state root with overlay + println!("=== Testing overlay with only account 3 modified ==="); + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + println!("Overlay state root: {:?}", overlay_state_root); + + // This should include accounts 1, 2 (from TrieDB) and 3 (from overlay) + // If account 2 is missing, we'll see only 2 accounts instead of 3 + } + + #[test] + fn test_debug_exact_failing_scenario() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed exactly 5 accounts like in the failing test + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let accounts_data = vec![ + ( + B256::from([0x01; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xA1; 32])), + }, + ), + ( + B256::from([0x02; 32]), + RethAccount { + nonce: 2, + balance: U256::from(2000), + bytecode_hash: Some(B256::from([0xA2; 32])), + }, + ), + ( + B256::from([0x03; 32]), + RethAccount { nonce: 3, balance: U256::from(3000), bytecode_hash: None }, + ), + ( + B256::from([0x04; 32]), + RethAccount { + nonce: 4, + balance: U256::from(4000), + bytecode_hash: Some(B256::from([0xA4; 32])), + }, + ), + ( + B256::from([0x05; 32]), + RethAccount { nonce: 5, balance: U256::from(5000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in &accounts_data { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Phase 2: Create overlay modifying accounts 1, 3, and 5 (exactly like the failing test) + let mut hashed_state = HashedPostState::default(); + + hashed_state.accounts.insert( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 10, // Changed from 1 + balance: U256::from(10000), // Changed from 1000 + bytecode_hash: Some(B256::from([0xB1; 32])), // Changed bytecode + }), + ); + + hashed_state.accounts.insert( + B256::from([0x03; 32]), + Some(RethAccount { + nonce: 30, // Changed from 3 + balance: U256::from(30000), // Changed from 3000 + bytecode_hash: Some(B256::from([0xB3; 32])), // Added bytecode + }), + ); + + hashed_state.accounts.insert( + B256::from([0x05; 32]), + Some(RethAccount { + nonce: 50, // Changed from 5 + balance: U256::from(50000), // Changed from 5000 + bytecode_hash: None, // Unchanged + }), + ); + + // Phase 3: Calculate state root with overlay + println!("=== Testing exact failing scenario: 5 accounts, modify 1,3,5 ==="); + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + println!("Overlay state root: {:?}", overlay_state_root); + + // This should include ALL 5 accounts: + // 1 (from overlay), 2 (from TrieDB), 3 (from overlay), 4 (from TrieDB), 5 (from overlay) + // If account 2 is missing like in the failure, we'll see only 4 accounts + } + + #[test] + fn test_state_root_multiple_account_modifications() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed multiple accounts into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + // Create 5 initial accounts with known hashed addresses + let accounts_data = vec![ + ( + B256::from([0x01; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xA1; 32])), + }, + ), + ( + B256::from([0x02; 32]), + RethAccount { + nonce: 2, + balance: U256::from(2000), + bytecode_hash: Some(B256::from([0xA2; 32])), + }, + ), + ( + B256::from([0x03; 32]), + RethAccount { nonce: 3, balance: U256::from(3000), bytecode_hash: None }, + ), + ( + B256::from([0x04; 32]), + RethAccount { + nonce: 4, + balance: U256::from(4000), + bytecode_hash: Some(B256::from([0xA4; 32])), + }, + ), + ( + B256::from([0x05; 32]), + RethAccount { nonce: 5, balance: U256::from(5000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in &accounts_data { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay modifying multiple accounts + let mut hashed_state = HashedPostState::default(); + + // Modify accounts 1, 3, and 5 + hashed_state.accounts.insert( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 10, // Changed from 1 + balance: U256::from(10000), // Changed from 1000 + bytecode_hash: Some(B256::from([0xB1; 32])), // Changed bytecode + }), + ); + + hashed_state.accounts.insert( + B256::from([0x03; 32]), + Some(RethAccount { + nonce: 30, // Changed from 3 + balance: U256::from(30000), // Changed from 3000 + bytecode_hash: Some(B256::from([0xB3; 32])), // Added bytecode + }), + ); + + hashed_state.accounts.insert( + B256::from([0x05; 32]), + Some(RethAccount { + nonce: 50, // Changed from 5 + balance: U256::from(50000), // Changed from 5000 + bytecode_hash: None, // Unchanged + }), + ); + + // Phase 3: Calculate state root with overlays + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify overlay state root differs from baseline + assert_ne!( + overlay_state_root, baseline_state_root, + "Overlay state root should differ from baseline" + ); + + // Phase 4: Commit overlays to TrieDB (apply changes individually) + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + commit_tx + .set_account( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 10, + balance: U256::from(10000), + bytecode_hash: Some(B256::from([0xB1; 32])), + }), + ) + .expect("Failed to apply account 1 change"); + + commit_tx + .set_account( + B256::from([0x03; 32]), + Some(RethAccount { + nonce: 30, + balance: U256::from(30000), + bytecode_hash: Some(B256::from([0xB3; 32])), + }), + ) + .expect("Failed to apply account 3 change"); + + commit_tx + .set_account( + B256::from([0x05; 32]), + Some(RethAccount { nonce: 50, balance: U256::from(50000), bytecode_hash: None }), + ) + .expect("Failed to apply account 5 change"); + + commit_tx.commit().expect("Failed to commit overlay changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + // All three calculations should match + assert_eq!( + overlay_state_root, committed_state_root, + "State root with overlay should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_state_root_account_deletions() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed accounts into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let accounts_to_delete = vec![ + ( + B256::from([0x10; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xD1; 32])), + }, + ), + ( + B256::from([0x20; 32]), + RethAccount { nonce: 2, balance: U256::from(2000), bytecode_hash: None }, + ), + ]; + + let accounts_to_keep = [ + ( + B256::from([0x30; 32]), + RethAccount { + nonce: 3, + balance: U256::from(3000), + bytecode_hash: Some(B256::from([0xC1; 32])), + }, + ), + ( + B256::from([0x40; 32]), + RethAccount { nonce: 4, balance: U256::from(4000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in accounts_to_delete.iter().chain(accounts_to_keep.iter()) { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay deleting some accounts + let mut hashed_state = HashedPostState::default(); + + // Delete the first two accounts (None = deletion) + for (hashed_addr, _) in &accounts_to_delete { + hashed_state.accounts.insert(*hashed_addr, None); + } + + // Phase 3: Calculate state root with deletions + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify state root changed + assert_ne!( + overlay_state_root, baseline_state_root, + "State root should change after deletions" + ); + + // Phase 4: Commit deletions to TrieDB + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + for (hashed_addr, _) in &accounts_to_delete { + commit_tx.set_account(*hashed_addr, None).expect("Failed to delete account"); + } + commit_tx.commit().expect("Failed to commit deletion changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + assert_eq!( + overlay_state_root, committed_state_root, + "State root with deletions should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_state_root_new_account_additions() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed some initial accounts + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let existing_accounts = vec![ + ( + B256::from([0x11; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xE1; 32])), + }, + ), + ( + B256::from([0x22; 32]), + RethAccount { nonce: 2, balance: U256::from(2000), bytecode_hash: None }, + ), + ]; + + for (hashed_addr, account) in &existing_accounts { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set initial account"); + } + + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay adding new accounts + let mut hashed_state = HashedPostState::default(); + + // Add new accounts that don't exist in TrieDB + let new_accounts = vec![ + ( + B256::from([0x33; 32]), + RethAccount { + nonce: 10, + balance: U256::from(10000), + bytecode_hash: Some(B256::from([0xC1; 32])), + }, + ), + ( + B256::from([0x44; 32]), + RethAccount { nonce: 20, balance: U256::from(20000), bytecode_hash: None }, + ), + ( + B256::from([0x55; 32]), + RethAccount { + nonce: 30, + balance: U256::from(30000), + bytecode_hash: Some(B256::from([0xC3; 32])), + }, + ), + ]; + + for (hashed_addr, account) in &new_accounts { + hashed_state.accounts.insert(*hashed_addr, Some(account.clone())); + } + + // Phase 3: Calculate state root with additions + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify state root changed + assert_ne!( + overlay_state_root, baseline_state_root, + "State root should change after additions" + ); + + // Phase 4: Commit additions to TrieDB + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + for (hashed_addr, account) in &new_accounts { + commit_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to add new account"); + } + commit_tx.commit().expect("Failed to commit addition changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + assert_eq!( + overlay_state_root, committed_state_root, + "State root with additions should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_state_root_mixed_operations() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed baseline accounts into TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let baseline_accounts = vec![ + ( + B256::from([0x01; 32]), + RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xC1; 32])), + }, + ), + ( + B256::from([0x02; 32]), + RethAccount { nonce: 2, balance: U256::from(2000), bytecode_hash: None }, + ), + ( + B256::from([0x03; 32]), + RethAccount { + nonce: 3, + balance: U256::from(3000), + bytecode_hash: Some(B256::from([0xC3; 32])), + }, + ), + ]; + + for (hashed_addr, account) in &baseline_accounts { + initial_tx + .set_account(*hashed_addr, Some(account.clone())) + .expect("Failed to set baseline account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay with mixed operations + let mut hashed_state = HashedPostState::default(); + + // UPDATE: Modify existing account 0x01 + hashed_state.accounts.insert( + B256::from([0x01; 32]), + Some(RethAccount { + nonce: 100, // Changed from 1 + balance: U256::from(100000), // Changed from 1000 + bytecode_hash: Some(B256::from([0xCF; 32])), // Changed bytecode + }), + ); + + // DELETE: Remove existing account 0x02 + hashed_state.accounts.insert(B256::from([0x02; 32]), None); + + // KEEP UNCHANGED: Account 0x03 not in overlay (should remain unchanged) + + // ADD: Insert new accounts 0x04 and 0x05 + hashed_state.accounts.insert( + B256::from([0x04; 32]), + Some(RethAccount { + nonce: 40, + balance: U256::from(40000), + bytecode_hash: Some(B256::from([0xC4; 32])), + }), + ); + + hashed_state.accounts.insert( + B256::from([0x05; 32]), + Some(RethAccount { nonce: 50, balance: U256::from(50000), bytecode_hash: None }), + ); + + // Phase 3: Calculate state root with mixed operations + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state.clone()), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify state root changed + assert_ne!( + overlay_state_root, baseline_state_root, + "State root should change after mixed operations" + ); + + // Phase 4: Commit mixed operations to TrieDB + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + // Apply all overlay changes + for (hashed_addr, account_opt) in hashed_state.accounts.iter() { + commit_tx + .set_account(*hashed_addr, account_opt.clone()) + .expect("Failed to apply mixed operation"); + } + commit_tx.commit().expect("Failed to commit mixed operation changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + assert_eq!( + overlay_state_root, committed_state_root, + "State root with mixed operations should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_state_root_large_dataset() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Seed large number of accounts (100 accounts) + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let num_baseline_accounts = 50; + for i in 0..num_baseline_accounts { + let mut addr_bytes = [0u8; 32]; + addr_bytes[31] = i as u8; // Use last byte for differentiation + let hashed_addr = B256::from(addr_bytes); + + let account = RethAccount { + nonce: i as u64, + balance: U256::from(i * 1000), + bytecode_hash: if i % 3 == 0 { Some(B256::from([i as u8; 32])) } else { None }, + }; + + initial_tx + .set_account(hashed_addr, Some(account)) + .expect("Failed to set baseline account"); + } + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay modifying subset of accounts + let mut hashed_state = HashedPostState::default(); + + // Modify every 5th account (10 total modifications) + for i in (0..num_baseline_accounts).step_by(5) { + let mut addr_bytes = [0u8; 32]; + addr_bytes[31] = i as u8; + let hashed_addr = B256::from(addr_bytes); + + hashed_state.accounts.insert( + hashed_addr, + Some(RethAccount { + nonce: (i + 100) as u64, // Changed + balance: U256::from((i + 100) * 1000), // Changed + bytecode_hash: Some(B256::from([(i + 200) as u8; 32])), // Changed + }), + ); + } + + // Add some new accounts + for i in num_baseline_accounts..(num_baseline_accounts + 20) { + let mut addr_bytes = [0u8; 32]; + addr_bytes[31] = i as u8; + let hashed_addr = B256::from(addr_bytes); + + hashed_state.accounts.insert( + hashed_addr, + Some(RethAccount { + nonce: i as u64, + balance: U256::from(i * 2000), + bytecode_hash: if i % 2 == 0 { Some(B256::from([i as u8; 32])) } else { None }, + }), + ); + } + + // Phase 3: Calculate state root with large dataset + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state.clone()), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify state root changed + assert_ne!( + overlay_state_root, baseline_state_root, + "State root should change after large dataset operations" + ); + + // Phase 4: Commit changes to TrieDB + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + for (hashed_addr, account_opt) in &hashed_state.accounts { + commit_tx + .set_account(*hashed_addr, *account_opt) + .expect("Failed to apply large dataset change"); + } + commit_tx.commit().expect("Failed to commit large dataset changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + assert_eq!( + overlay_state_root, committed_state_root, + "State root with large dataset should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } + + #[test] + fn test_state_root_with_storage_overlay() { + let (provider, _temp_dir) = create_test_triedb_provider(); + + // Phase 1: Set up account with storage in TrieDB + let initial_tx = provider.tx_mut().expect("Failed to create initial RW transaction"); + + let hashed_address = B256::from([0x01; 32]); + let account = RethAccount { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from([0xAB; 32])), + }; + + initial_tx.set_account(hashed_address, Some(account)).expect("Failed to set account"); + + // Add some initial storage + let storage_key1 = B256::from([0x01; 32]); + let storage_value1 = U256::from(100); + let storage_key2 = B256::from([0x02; 32]); + let storage_value2 = U256::from(200); + + initial_tx + .set_storage_slot(hashed_address, storage_key1, Some(storage_value1)) + .expect("Failed to set storage 1"); + initial_tx + .set_storage_slot(hashed_address, storage_key2, Some(storage_value2)) + .expect("Failed to set storage 2"); + initial_tx.commit().expect("Failed to commit initial data"); + + // Get baseline state root + let baseline_tx = provider.tx().expect("Failed to create baseline RO transaction"); + let baseline_state_root = baseline_tx.state_root(); + + // Phase 2: Create overlay with storage modifications + let mut hashed_state = HashedPostState::default(); + + // Keep the account the same, but modify storage + hashed_state.accounts.insert(hashed_address, Some(account)); + + // Create storage overlay + let mut storage = HashedStorage::default(); + // Modify existing storage value + storage.storage.insert(storage_key1, U256::from(150)); // Changed from 100 to 150 + // Add new storage value + let storage_key3 = B256::from([0x03; 32]); + storage.storage.insert(storage_key3, U256::from(300)); + + hashed_state.storages.insert(hashed_address, storage); + + // Phase 3: Calculate state root with storage overlay + let state_root_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::from_state(hashed_state.clone()), + ); + let overlay_state_root = + state_root_calc.incremental_root().expect("Failed to calculate overlay state root"); + + // Verify state root changed due to storage modifications + assert_ne!( + overlay_state_root, baseline_state_root, + "State root should change after storage modifications" + ); + + // Phase 4: Commit storage changes to TrieDB + let commit_tx = provider.tx_mut().expect("Failed to create RW transaction for commit"); + + // Apply storage modifications + commit_tx + .set_storage_slot(hashed_address, storage_key1, Some(U256::from(150))) + .expect("Failed to modify storage 1"); + commit_tx + .set_storage_slot(hashed_address, storage_key3, Some(U256::from(300))) + .expect("Failed to add storage 3"); + + commit_tx.commit().expect("Failed to commit storage changes"); + + // Phase 5: Verify consistency + let final_tx = provider.tx().expect("Failed to create final RO transaction"); + let committed_state_root = final_tx.state_root(); + let account = final_tx + .get_account(AddressPath::new(Nibbles::unpack(hashed_address))) + .expect("Failed to get account"); + println!("committed account: {:?}", account); + + let empty_overlay_calc = TrieDbOverlayStateRoot::new( + provider.tx().expect("Failed to create RO transaction"), + TrieInput::default(), + ); + let empty_overlay_state_root = empty_overlay_calc + .incremental_root() + .expect("Failed to calculate empty overlay state root"); + + assert_eq!( + overlay_state_root, committed_state_root, + "State root with storage overlay should match TrieDB after commit" + ); + assert_eq!( + committed_state_root, empty_overlay_state_root, + "TrieDB state root should match empty overlay calculation" + ); + } +} diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 16388de91ae..c6120342543 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,10 @@ use crate::{ + providers::TrieDbTransaction, traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, HeaderProvider, PruneCheckpointReader, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, TransactionsProvider, + StateRootProvider, TransactionVariant, TransactionsProvider, TrieDbTxProvider, }; use alloy_consensus::{ constants::EMPTY_ROOT_HASH, @@ -860,6 +861,24 @@ where } } +impl TrieDbTxProvider for MockEthProvider +where + T: NodePrimitives, + ChainSpec: Send + Sync, +{ + fn triedb_tx_ref(&self) -> &TrieDbTransaction { + panic!("not implemented"); + } + + fn triedb_tx(&mut self) -> &mut TrieDbTransaction { + panic!("not implemented"); + } + + fn into_triedb_tx(self) -> TrieDbTransaction { + panic!("not implemented"); + } +} + impl HashedPostStateProvider for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index ccda2d60e85..43b0b0bd126 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,11 +1,13 @@ use crate::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{ProviderNodeTypes, StaticFileProvider, TrieDbProvider}, HashingWriter, ProviderFactory, TrieWriter, }; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + test_utils::{ + create_test_rw_db, create_test_static_files_dir, create_test_triedb_dir, TempDatabase, + }, DatabaseEnv, }; use reth_errors::ProviderResult; @@ -54,11 +56,13 @@ pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); + let (triedb_dir, _) = create_test_triedb_dir(); let db = create_test_rw_db(); ProviderFactory::new( db, chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), + TrieDbProvider::open(triedb_dir.keep()).unwrap(), ) } diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 2837a4505fb..9f53cd7ad8b 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -8,5 +8,8 @@ pub use reth_chainspec::ChainSpecProvider; mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; +mod triedb_provider; +pub use triedb_provider::{TrieDbProviderFactory, TrieDbTxProvider}; + mod full; pub use full::FullProvider; diff --git a/crates/storage/provider/src/traits/triedb_provider.rs b/crates/storage/provider/src/traits/triedb_provider.rs new file mode 100644 index 00000000000..2da03761c52 --- /dev/null +++ b/crates/storage/provider/src/traits/triedb_provider.rs @@ -0,0 +1,15 @@ +#![allow(missing_docs)] + +use crate::providers::{TrieDbProvider, TrieDbTransaction}; + +/// TrieDB provider factory. +pub trait TrieDbProviderFactory { + /// Create new instance of TrieDB provider. + fn triedb_provider(&self) -> TrieDbProvider; +} + +pub trait TrieDbTxProvider { + fn triedb_tx_ref(&self) -> &TrieDbTransaction; + fn triedb_tx(&mut self) -> &mut TrieDbTransaction; + fn into_triedb_tx(self) -> TrieDbTransaction; +} diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 09ccd301192..17b438c95e1 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -32,6 +32,12 @@ reth-db = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } +reth-trie-sparse = { workspace = true, features = ["test-utils"] } + +# Additional dependencies for backend testing +tempfile.workspace = true +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } alloy-consensus.workspace = true alloy-rlp.workspace = true diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 9fb882b44a5..5ea21d04315 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -53,6 +53,7 @@ criterion.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] } +tempfile.workspace = true [features] default = ["metrics"] diff --git a/crates/trie/parallel/src/metrics.rs b/crates/trie/parallel/src/metrics.rs index a173ef07ae2..3c8861e7401 100644 --- a/crates/trie/parallel/src/metrics.rs +++ b/crates/trie/parallel/src/metrics.rs @@ -4,7 +4,7 @@ use reth_metrics::Metrics; use reth_trie::{metrics::TrieRootMetrics, TrieType}; /// Parallel state root metrics. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ParallelStateRootMetrics { /// State trie metrics. pub state_trie: TrieRootMetrics, @@ -33,7 +33,7 @@ impl ParallelStateRootMetrics { } /// Parallel state root metrics. -#[derive(Metrics)] +#[derive(Clone, Metrics)] #[metrics(scope = "trie_parallel")] pub struct ParallelTrieMetrics { /// The number of storage roots computed in parallel. diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 433c13fb08f..787d304ae3d 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -259,7 +259,9 @@ mod tests { }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; use reth_trie::proof::Proof; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime; diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 8da4c28d91a..e4a8aadfa48 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -42,7 +42,9 @@ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_provider::{DatabaseProviderROFactory, ProviderError, ProviderResult}; +use reth_provider::{ + providers::triedb::TrieDbTransaction, DatabaseProviderROFactory, ProviderError, ProviderResult, +}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedCursorFactory, @@ -381,6 +383,17 @@ impl ProofTaskCtx { } } +/// Transaction type for proof tasks that supports both Database and TrieDB backends +#[derive(Debug)] +pub enum ProofTaskTransaction { + /// Database transaction + Database(Tx), + /// TrieDB transaction + TrieDb(TrieDbTransaction), + /// Both Database and TrieDB transactions + Both(Tx, TrieDbTransaction), +} + /// This contains all information shared between all storage proof instances. #[derive(Debug)] pub struct ProofTaskTx { diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 5c9294e8f92..7613754576c 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -5,7 +5,7 @@ use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{DatabaseProviderROFactory, ProviderError}; +use reth_provider::{DatabaseProviderROFactory, ProviderError, TrieDbTxProvider}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedCursorFactory, @@ -60,8 +60,9 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderROFactory - + Clone + Factory: DatabaseProviderROFactory< + Provider: TrieCursorFactory + HashedCursorFactory + TrieDbTxProvider, + > + Clone + Send + 'static, { diff --git a/crates/triedb/Cargo.toml b/crates/triedb/Cargo.toml new file mode 100644 index 00000000000..883148d0fca --- /dev/null +++ b/crates/triedb/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "reth-triedb" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "TrieDB integration for Reth" + +[lints] +workspace = true + +[dependencies] +# Core dependencies +reth-storage-errors = { workspace = true } +reth-db-api = { workspace = true } +reth-trie = { workspace = true } +reth-trie-common = { workspace = true } +reth-trie-db = { workspace = true } +reth-primitives-traits = { workspace = true } +reth-provider = { workspace = true } +reth-storage-api = { workspace = true } + +# External TrieDB +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } + +# Alloy primitives +alloy-primitives = { workspace = true } +alloy-trie = { workspace = true } + +# Async and utilities +tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true } + +[dev-dependencies] +reth-provider = { workspace = true, features = ["test-utils"] } +tempfile = { workspace = true } diff --git a/crates/triedb/src/lib.rs b/crates/triedb/src/lib.rs new file mode 100644 index 00000000000..0d2c99a0a0d --- /dev/null +++ b/crates/triedb/src/lib.rs @@ -0,0 +1,9 @@ +//! TrieDB integration for Reth +//! +//! This crate provides adapters and integrations to use TrieDB as the underlying +//! storage backend for Reth's trie operations, replacing the traditional table-based +//! approach for better performance and native merkle proof generation. + +pub mod provider; + +pub use provider::TrieDbProviderFactory; diff --git a/crates/triedb/src/provider.rs b/crates/triedb/src/provider.rs new file mode 100644 index 00000000000..7766492dc68 --- /dev/null +++ b/crates/triedb/src/provider.rs @@ -0,0 +1,83 @@ +//! TrieDB provider factory implementation +//! +//! This module provides the `TrieDbProviderFactory` that creates TrieDB-backed +//! providers for use in Reth's multiproof system and sparse trie generation. + +use reth_provider::providers::{TrieDbProvider, TrieDbTransaction}; +use reth_storage_errors::provider::ProviderResult; + +/// Factory for creating TrieDB-backed providers +#[derive(Debug, Clone)] +pub struct TrieDbProviderFactory { + provider: TrieDbProvider, +} + +impl TrieDbProviderFactory { + /// Create a new TrieDB provider factory + pub fn new(provider: TrieDbProvider) -> Self { + Self { provider } + } + + /// Get a read-only TrieDB provider + pub fn provider_ro(&self) -> ProviderResult { + let tx = self.provider.tx()?; + Ok(TrieDbProviderReadOnly::new(tx)) + } + + /// Get a read-write TrieDB provider + pub fn provider_rw(&self) -> ProviderResult { + let tx = self.provider.tx_mut()?; + Ok(TrieDbProviderReadWrite::new(tx)) + } +} + +/// Read-only TrieDB provider that implements both cursor factory traits +#[derive(Debug)] +pub struct TrieDbProviderReadOnly { + tx: TrieDbTransaction, +} + +impl TrieDbProviderReadOnly { + /// Create a new read-only provider + pub fn new(tx: TrieDbTransaction) -> Self { + Self { tx } + } + + /// Get the underlying transaction + pub fn tx(&self) -> &TrieDbTransaction { + &self.tx + } + + /// Consume the provider and return the transaction + pub fn into_tx(self) -> TrieDbTransaction { + self.tx + } +} + +/// Read-write TrieDB provider that implements both cursor factory traits +#[derive(Debug)] +pub struct TrieDbProviderReadWrite { + tx: TrieDbTransaction, +} + +impl TrieDbProviderReadWrite { + /// Create a new read-write provider + pub fn new(tx: TrieDbTransaction) -> Self { + Self { tx } + } + + /// Get the underlying transaction + pub fn tx(&self) -> &TrieDbTransaction { + &self.tx + } + + /// Consume the provider and return the transaction + pub fn into_tx(self) -> TrieDbTransaction { + self.tx + } + + /// Commit the transaction + pub fn commit(self) -> ProviderResult<()> { + self.tx.commit() + } +} diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index d6f41980dd8..e1bc8b878f9 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] +reth-provider.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ca724e52af2..be36a50fd61 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -59,6 +59,7 @@ use reth_ethereum::{ }; use reth_ethereum_payload_builder::{EthereumBuilderConfig, EthereumExecutionPayloadValidator}; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError}; +use reth_provider::{DatabaseProviderFactory, TrieDbTxProvider}; use reth_tracing::{RethTracer, Tracer}; use serde::{Deserialize, Serialize}; use std::{convert::Infallible, sync::Arc}; @@ -281,6 +282,7 @@ pub type MyNodeAddOns = RpcAddOns Node for MyCustomNode where N: FullNodeTypes, + ::Provider: TrieDbTxProvider, { type ComponentsBuilder = ComponentsBuilder< N, diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index fe1f0006256..1a05af08356 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true [dependencies] # reth +reth-provider.workspace = true reth-chain-state.workspace = true reth-codecs.workspace = true reth-network-peers.workspace = true diff --git a/examples/custom-node/src/lib.rs b/examples/custom-node/src/lib.rs index 4210ac9b767..fddb16f5867 100644 --- a/examples/custom-node/src/lib.rs +++ b/examples/custom-node/src/lib.rs @@ -29,6 +29,7 @@ use reth_op::{ }, rpc::OpEthApiBuilder, }; +use reth_provider::{DatabaseProviderFactory, TrieDbTxProvider}; pub mod chainspec; pub mod engine; @@ -53,6 +54,7 @@ impl NodeTypes for CustomNode { impl Node for CustomNode where N: FullNodeTypes, + ::Provider: TrieDbTxProvider, { type ComponentsBuilder = ComponentsBuilder< N, diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 97bd1debdcc..6c2334ae035 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -24,7 +24,7 @@ use reth_ethereum::{ pool::noop::NoopTransactionPool, provider::{ db::{mdbx::DatabaseArguments, open_db_read_only, ClientVersion, DatabaseEnv}, - providers::{BlockchainProvider, StaticFileProvider}, + providers::{BlockchainProvider, StaticFileProvider, TrieDbProvider}, ProviderFactory, }, rpc::{ @@ -53,6 +53,7 @@ async fn main() -> eyre::Result<()> { db.clone(), spec.clone(), StaticFileProvider::read_only(db_path.join("static_files"), true)?, + TrieDbProvider::open(db_path.join("triedb"))?, ); // 2. Set up the blockchain provider using only the database provider and a noop for the tree to diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index e9cf465a98d..26e79da8605 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -34,6 +34,10 @@ reth-trie.workspace = true reth-trie-db.workspace = true revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } +# triedb +#triedb = { git = "https://github.com/base/triedb" } +triedb = { path = "/home/andrea/src/triedb" } + alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 49c49bf1936..7e4b48106b0 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -243,12 +243,12 @@ impl Account { } else { return Err(Error::Assertion(format!( "Slot {slot:?} is missing from the database. Expected {value:?}" - ))) + ))); } } else { return Err(Error::Assertion(format!( "Slot {slot:?} is missing from the database. Expected {value:?}" - ))) + ))); } }